diff --git a/.gitignore b/.gitignore index 7f4ab7c..da9b86d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ -.idea/* -.vscode/* -venv/* +.idea +.venv +.vscode +mcstrack.egg-info +pi-gen-detector/pi-gen +venv *.pyc -.venv/* - diff --git a/data/controller_detector_lone_local.json b/data/configuration/controller/detector_local.json similarity index 87% rename from data/controller_detector_lone_local.json rename to data/configuration/controller/detector_local.json index 0be740a..c013307 100644 --- a/data/controller_detector_lone_local.json +++ b/data/configuration/controller/detector_local.json @@ -7,5 +7,5 @@ "port": 8001 } ], - "pose_solvers": [] + "mixers": [] } diff --git a/data/configuration/controller/dual_pi.json b/data/configuration/controller/dual_pi.json new file mode 100644 index 0000000..8cecafb --- /dev/null +++ b/data/configuration/controller/dual_pi.json @@ -0,0 +1,203 @@ +{ + "startup_mode": "detecting_and_solving", + "detectors": [ + { + "label": "d101", + "ip_address": "192.168.0.101", + "port": 8001, + "camera_parameters": [], + "marker_parameters": [ + { + "key": "cornerRefinementMethod", + "value": "SUBPIX" + } + ] + }, + { + "label": "d102", + "ip_address": "192.168.0.102", + "port": 8001, + "camera_parameters": [], + "marker_parameters": [ + { + "key": "cornerRefinementMethod", + "value": "SUBPIX" + } + ] + } + ], + "mixers": [ + { + "label": "sol", + "ip_address": "192.168.0.103", + "port": 8000, + "targets": [ + { + "label": "reference", + "landmarks": [ + { "feature_label": "0$0", "x": -55.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "0$1", "x": -45.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "0$2", "x": -45.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "0$3", "x": -55.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "1$0", "x": -15.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "1$1", "x": -5.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "1$2", "x": -5.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "1$3", "x": -15.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "2$0", "x": 25.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "2$1", "x": 35.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "2$2", "x": 35.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "2$3", "x": 25.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "3$0", "x": 65.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "3$1", "x": 75.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "3$2", "x": 75.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "3$3", "x": 65.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "4$0", "x": -75.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "4$1", "x": -65.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "4$2", "x": -65.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "4$3", "x": -75.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "5$0", "x": -35.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "5$1", "x": -25.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "5$2", "x": -25.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "5$3", "x": -35.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "6$0", "x": 5.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "6$1", "x": 15.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "6$2", "x": 15.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "6$3", "x": 5.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "7$0", "x": 45.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "7$1", "x": 55.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "7$2", "x": 55.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "7$3", "x": 45.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "8$0", "x": -55.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "8$1", "x": -45.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "8$2", "x": -45.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "8$3", "x": -55.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "9$0", "x": -15.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "9$1", "x": -5.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "9$2", "x": -5.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "9$3", "x": -15.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "10$0", "x": 25.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "10$1", "x": 35.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "10$2", "x": 35.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "10$3", "x": 25.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "11$0", "x": 65.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "11$1", "x": 75.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "11$2", "x": 75.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "11$3", "x": 65.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "12$0", "x": -75.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "12$1", "x": -65.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "12$2", "x": -65.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "12$3", "x": -75.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "13$0", "x": -35.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "13$1", "x": -25.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "13$2", "x": -25.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "13$3", "x": -35.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "14$0", "x": 5.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "14$1", "x": 15.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "14$2", "x": 15.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "14$3", "x": 5.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "15$0", "x": 45.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "15$1", "x": 55.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "15$2", "x": 55.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "15$3", "x": 45.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "16$0", "x": -55.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "16$1", "x": -45.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "16$2", "x": -45.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "16$3", "x": -55.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "17$0", "x": -15.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "17$1", "x": -5.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "17$2", "x": -5.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "17$3", "x": -15.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "18$0", "x": 25.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "18$1", "x": 35.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "18$2", "x": 35.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "18$3", "x": 25.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "19$0", "x": 65.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "19$1", "x": 75.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "19$2", "x": 75.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "19$3", "x": 65.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "20$0", "x": -75.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "20$1", "x": -65.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "20$2", "x": -65.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "20$3", "x": -75.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "21$0", "x": -35.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "21$1", "x": -25.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "21$2", "x": -25.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "21$3", "x": -35.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "22$0", "x": 5.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "22$1", "x": 15.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "22$2", "x": 15.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "22$3", "x": 5.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "23$0", "x": 45.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "23$1", "x": 55.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "23$2", "x": 55.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "23$3", "x": 45.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "24$0", "x": -55.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "24$1", "x": -45.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "24$2", "x": -45.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "24$3", "x": -55.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "25$0", "x": -15.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "25$1", "x": -5.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "25$2", "x": -5.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "25$3", "x": -15.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "26$0", "x": 25.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "26$1", "x": 35.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "26$2", "x": 35.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "26$3", "x": 25.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "27$0", "x": 65.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "27$1", "x": 75.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "27$2", "x": 75.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "27$3", "x": 65.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "28$0", "x": -75.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "28$1", "x": -65.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "28$2", "x": -65.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "28$3", "x": -75.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "29$0", "x": -35.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "29$1", "x": -25.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "29$2", "x": -25.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "29$3", "x": -35.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "30$0", "x": 5.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "30$1", "x": 15.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "30$2", "x": 15.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "30$3", "x": 5.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "31$0", "x": 45.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "31$1", "x": 55.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "31$2", "x": 55.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "31$3", "x": 45.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "32$0", "x": -55.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "32$1", "x": -45.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "32$2", "x": -45.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "32$3", "x": -55.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "33$0", "x": -15.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "33$1", "x": -5.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "33$2", "x": -5.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "33$3", "x": -15.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "34$0", "x": 25.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "34$1", "x": 35.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "34$2", "x": 35.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "34$3", "x": 25.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "35$0", "x": 65.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "35$1", "x": 75.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "35$2", "x": 75.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "35$3", "x": 65.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "36$0", "x": -75.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "36$1", "x": -65.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "36$2", "x": -65.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "36$3", "x": -75.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "37$0", "x": -35.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "37$1", "x": -25.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "37$2", "x": -25.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "37$3", "x": -35.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "38$0", "x": 5.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "38$1", "x": 15.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "38$2", "x": 15.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "38$3", "x": 5.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "39$0", "x": 45.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "39$1", "x": 55.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "39$2", "x": 55.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "39$3", "x": 45.0, "y": -95.0, "z": 0.0 } + ] + } + ] + } + ] +} diff --git a/data/measure_detector_to_reference_config.json b/data/configuration/controller/measure_detector_to_reference_config.json similarity index 99% rename from data/measure_detector_to_reference_config.json rename to data/configuration/controller/measure_detector_to_reference_config.json index 12b653b..1c8f817 100644 --- a/data/measure_detector_to_reference_config.json +++ b/data/configuration/controller/measure_detector_to_reference_config.json @@ -26,7 +26,7 @@ ] } ], - "pose_solvers": [ + "mixers": [ { "label": "ps", "ip_address": "127.0.0.1", diff --git a/data/output_measure_detector_to_reference_config.json b/data/configuration/controller/output_measure_detector_to_reference_config.json similarity index 99% rename from data/output_measure_detector_to_reference_config.json rename to data/configuration/controller/output_measure_detector_to_reference_config.json index 962a98d..56ef648 100644 --- a/data/output_measure_detector_to_reference_config.json +++ b/data/configuration/controller/output_measure_detector_to_reference_config.json @@ -62,7 +62,7 @@ ] } ], - "pose_solvers": [ + "mixers": [ { "label": "ps", "ip_address": "127.0.0.1", diff --git a/data/slicer_test_config_1.json b/data/configuration/controller/slicer_test_config_1.json similarity index 99% rename from data/slicer_test_config_1.json rename to data/configuration/controller/slicer_test_config_1.json index f3971d6..07aef26 100644 --- a/data/slicer_test_config_1.json +++ b/data/configuration/controller/slicer_test_config_1.json @@ -125,7 +125,7 @@ } } ], - "pose_solvers": [ + "mixers": [ { "label": "sol", "ip_address": "127.0.0.1", diff --git a/data/configuration/detector/opencv_aruco.json b/data/configuration/detector/opencv_aruco.json new file mode 100644 index 0000000..5bf4c83 --- /dev/null +++ b/data/configuration/detector/opencv_aruco.json @@ -0,0 +1,19 @@ +{ + "detector_label": "detector_01", + "intrinsic_calibrator": { + "implementation": "charuco_opencv", + "configuration": { + "data_path": "./calibration/intrinsic/" + } + }, + "camera": { + "implementation": "opencv_capture_device", + "configuration": { + "capture_device": 0 + } + }, + "annotator": { + "implementation": "aruco_opencv", + "configuration": {} + } +} diff --git a/data/configuration/detector/rpicam_aruco.json b/data/configuration/detector/rpicam_aruco.json new file mode 100644 index 0000000..8cdd31b --- /dev/null +++ b/data/configuration/detector/rpicam_aruco.json @@ -0,0 +1,17 @@ +{ + "detector_label": "detector_01", + "intrinsic_calibrator": { + "implementation": "charuco_opencv", + "configuration": { + "data_path": "./calibration/intrinsic/" + } + }, + "camera": { + "implementation": "picamera2", + "configuration": {} + }, + "annotator": { + "implementation": "aruco_opencv", + "configuration": {} + } +} diff --git a/data/configuration/mixer/aruco.json b/data/configuration/mixer/aruco.json new file mode 100644 index 0000000..85ef7d2 --- /dev/null +++ b/data/configuration/mixer/aruco.json @@ -0,0 +1,9 @@ +{ + "mixer_label": "mixer_01", + "extrinsic_calibrator": { + "implementation": "charuco_opencv", + "configuration": { + "data_path": "./calibration/extrinsic/" + } + } +} diff --git a/data/controller_calibration_config.json b/data/controller_calibration_config.json deleted file mode 100644 index c6c5b43..0000000 --- a/data/controller_calibration_config.json +++ /dev/null @@ -1,384 +0,0 @@ -{ - "startup_mode": "detecting_and_solving", - "detectors": [ - { - "label": "det1", - "ip_address": "127.0.0.1", - "port": 8001 - } - ], - "pose_solvers": [ - { - "label": "sol", - "ip_address": "127.0.0.1", - "port": 8000, - "targets": [ - { - "target_id": "reference", - "markers": [ - { - "marker_id": "0", - "points": [ - [-55.0,95.0,0.0], - [-45.0,95.0,0.0], - [-45.0,85.0,0.0], - [-55.0,85.0,0.0] - ] - }, - { - "marker_id": "1", - "points": [ - [-15.0,95.0,0.0], - [-5.0,95.0,0.0], - [-5.0,85.0,0.0], - [-15.0,85.0,0.0] - ] - }, - { - "marker_id": "2", - "points": [ - [25.0,95.0,0.0], - [35.0,95.0,0.0], - [35.0,85.0,0.0], - [25.0,85.0,0.0] - ] - }, - { - "marker_id": "3", - "points": [ - [65.0,95.0,0.0], - [75.0,95.0,0.0], - [75.0,85.0,0.0], - [65.0,85.0,0.0] - ] - }, - { - "marker_id": "4", - "points": [ - [-75.0,75.0,0.0], - [-65.0,75.0,0.0], - [-65.0,65.0,0.0], - [-75.0,65.0,0.0] - ] - }, - { - "marker_id": "5", - "points": [ - [-35.0,75.0,0.0], - [-25.0,75.0,0.0], - [-25.0,65.0,0.0], - [-35.0,65.0,0.0] - ] - }, - { - "marker_id": "6", - "points": [ - [5.0,75.0,0.0], - [15.0,75.0,0.0], - [15.0,65.0,0.0], - [5.0,65.0,0.0] - ] - }, - { - "marker_id": "7", - "points": [ - [45.0,75.0,0.0], - [55.0,75.0,0.0], - [55.0,65.0,0.0], - [45.0,65.0,0.0] - ] - }, - { - "marker_id": "8", - "points": [ - [-55.0,55.0,0.0], - [-45.0,55.0,0.0], - [-45.0,45.0,0.0], - [-55.0,45.0,0.0] - ] - }, - { - "marker_id": "9", - "points": [ - [-15.0,55.0,0.0], - [-5.0,55.0,0.0], - [-5.0,45.0,0.0], - [-15.0,45.0,0.0] - ] - }, - { - "marker_id": "10", - "points": [ - [25.0,55.0,0.0], - [35.0,55.0,0.0], - [35.0,45.0,0.0], - [25.0,45.0,0.0] - ] - }, - { - "marker_id": "11", - "points": [ - [65.0,55.0,0.0], - [75.0,55.0,0.0], - [75.0,45.0,0.0], - [65.0,45.0,0.0] - ] - }, - { - "marker_id": "12", - "points": [ - [-75.0,35.0,0.0], - [-65.0,35.0,0.0], - [-65.0,25.0,0.0], - [-75.0,25.0,0.0] - ] - }, - { - "marker_id": "13", - "points": [ - [-35.0,35.0,0.0], - [-25.0,35.0,0.0], - [-25.0,25.0,0.0], - [-35.0,25.0,0.0] - ] - }, - { - "marker_id": "14", - "points": [ - [5.0,35.0,0.0], - [15.0,35.0,0.0], - [15.0,25.0,0.0], - [5.0,25.0,0.0] - ] - }, - { - "marker_id": "15", - "points": [ - [45.0,35.0,0.0], - [55.0,35.0,0.0], - [55.0,25.0,0.0], - [45.0,25.0,0.0] - ] - }, - { - "marker_id": "16", - "points": [ - [-55.0,15.0,0.0], - [-45.0,15.0,0.0], - [-45.0,5.0,0.0], - [-55.0,5.0,0.0] - ] - }, - { - "marker_id": "17", - "points": [ - [-15.0,15.0,0.0], - [-5.0,15.0,0.0], - [-5.0,5.0,0.0], - [-15.0,5.0,0.0] - ] - }, - { - "marker_id": "18", - "points": [ - [25.0,15.0,0.0], - [35.0,15.0,0.0], - [35.0,5.0,0.0], - [25.0,5.0,0.0] - ] - }, - { - "marker_id": "19", - "points": [ - [65.0,15.0,0.0], - [75.0,15.0,0.0], - [75.0,5.0,0.0], - [65.0,5.0,0.0] - ] - }, - { - "marker_id": "20", - "points": [ - [-75.0,-5.0,0.0], - [-65.0,-5.0,0.0], - [-65.0,-15.0,0.0], - [-75.0,-15.0,0.0] - ] - }, - { - "marker_id": "21", - "points": [ - [-35.0,-5.0,0.0], - [-25.0,-5.0,0.0], - [-25.0,-15.0,0.0], - [-35.0,-15.0,0.0] - ] - }, - { - "marker_id": "22", - "points": [ - [5.0,-5.0,0.0], - [15.0,-5.0,0.0], - [15.0,-15.0,0.0], - [5.0,-15.0,0.0] - ] - }, - { - "marker_id": "23", - "points": [ - [45.0,-5.0,0.0], - [55.0,-5.0,0.0], - [55.0,-15.0,0.0], - [45.0,-15.0,0.0] - ] - }, - { - "marker_id": "24", - "points": [ - [-55.0,-25.0,0.0], - [-45.0,-25.0,0.0], - [-45.0,-35.0,0.0], - [-55.0,-35.0,0.0] - ] - }, - { - "marker_id": "25", - "points": [ - [-15.0,-25.0,0.0], - [-5.0,-25.0,0.0], - [-5.0,-35.0,0.0], - [-15.0,-35.0,0.0] - ] - }, - { - "marker_id": "26", - "points": [ - [25.0,-25.0,0.0], - [35.0,-25.0,0.0], - [35.0,-35.0,0.0], - [25.0,-35.0,0.0] - ] - }, - { - "marker_id": "27", - "points": [ - [65.0,-25.0,0.0], - [75.0,-25.0,0.0], - [75.0,-35.0,0.0], - [65.0,-35.0,0.0] - ] - }, - { - "marker_id": "28", - "points": [ - [-75.0,-45.0,0.0], - [-65.0,-45.0,0.0], - [-65.0,-55.0,0.0], - [-75.0,-55.0,0.0] - ] - }, - { - "marker_id": "29", - "points": [ - [-35.0,-45.0,0.0], - [-25.0,-45.0,0.0], - [-25.0,-55.0,0.0], - [-35.0,-55.0,0.0] - ] - }, - { - "marker_id": "30", - "points": [ - [5.0,-45.0,0.0], - [15.0,-45.0,0.0], - [15.0,-55.0,0.0], - [5.0,-55.0,0.0] - ] - }, - { - "marker_id": "31", - "points": [ - [45.0,-45.0,0.0], - [55.0,-45.0,0.0], - [55.0,-55.0,0.0], - [45.0,-55.0,0.0] - ] - }, - { - "marker_id": "32", - "points": [ - [-55.0,-65.0,0.0], - [-45.0,-65.0,0.0], - [-45.0,-75.0,0.0], - [-55.0,-75.0,0.0] - ] - }, - { - "marker_id": "33", - "points": [ - [-15.0,-65.0,0.0], - [-5.0,-65.0,0.0], - [-5.0,-75.0,0.0], - [-15.0,-75.0,0.0] - ] - }, - { - "marker_id": "34", - "points": [ - [25.0,-65.0,0.0], - [35.0,-65.0,0.0], - [35.0,-75.0,0.0], - [25.0,-75.0,0.0] - ] - }, - { - "marker_id": "35", - "points": [ - [65.0,-65.0,0.0], - [75.0,-65.0,0.0], - [75.0,-75.0,0.0], - [65.0,-75.0,0.0] - ] - }, - { - "marker_id": "36", - "points": [ - [-75.0,-85.0,0.0], - [-65.0,-85.0,0.0], - [-65.0,-95.0,0.0], - [-75.0,-95.0,0.0] - ] - }, - { - "marker_id": "37", - "points": [ - [-35.0,-85.0,0.0], - [-25.0,-85.0,0.0], - [-25.0,-95.0,0.0], - [-35.0,-95.0,0.0] - ] - }, - { - "marker_id": "38", - "points": [ - [5.0,-85.0,0.0], - [15.0,-85.0,0.0], - [15.0,-95.0,0.0], - [5.0,-95.0,0.0] - ] - }, - { - "marker_id": "39", - "points": [ - [45.0,-85.0,0.0], - [55.0,-85.0,0.0], - [55.0,-95.0,0.0], - [45.0,-95.0,0.0] - ] - } - ] - } - ] - } - ] -} diff --git a/data/detector_config.json b/data/detector_config.json deleted file mode 100644 index 5af146b..0000000 --- a/data/detector_config.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "calibrator_configuration": { - "data_path": "./example_data_path/" - }, - "camera_configuration": { - "driver": "picamera2", - "capture_device": "0" - }, - "marker_configuration": { - "method": "aruco_opencv" - } -} diff --git a/data/pose_solver_config.json b/data/pose_solver_config.json deleted file mode 100644 index 6714195..0000000 --- a/data/pose_solver_config.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "serial_identifier": "example_pose_solver" -} diff --git a/doc/glossary.md b/doc/glossary.md new file mode 100644 index 0000000..00eddb1 --- /dev/null +++ b/doc/glossary.md @@ -0,0 +1,11 @@ +# Glossary +- Annotation: A feature that is detected in an image. +- Controller: Component responsible for coordinating communication between other components. +- Detector: Component responsible for capturing images, processing them, and generating Annotations for tracking. +- Feature: Something that is identifiable either in 2D or 3D. +- Landmark: A unique feature on a Target that has its own distinct 3D coordinates. +- Pose: a position and orientation. +- Pose Solver: Component responsible for receiving Annotations and calculating Poses per Target. +- Target: A definition of something to track. Currently these consist of Landmarks. + +Notes: Annotation, Feature, and Landmark are all distinct but tightly related concepts. diff --git a/doc/setup.md b/doc/setup.md index a871d27..ec07dc1 100644 --- a/doc/setup.md +++ b/doc/setup.md @@ -53,11 +53,11 @@ Please install a recent version of Visual Studio Build Tools: https://visualstud You may need to manually modify the installation and ensure the C++ workload is selected. ``` -py -3.11 -m venv venv +py -3.11 -m venv .venv cd venv/Scripts activate cd ../.. -pip install -r requirements.txt +pip install .[gui,component] ``` ### Linux @@ -70,9 +70,9 @@ You may need to install additional packages depending on your distribution. The - python3.11-dev ``` -py -3.11 -m venv venv +py -3.11 -m venv .venv source venv/bin/activate -pip install -r requirements.txt +pip install .[gui,component] ``` ### Troubleshooting diff --git a/pi-gen-detector/config b/pi-gen-detector/config new file mode 100644 index 0000000..64a39f7 --- /dev/null +++ b/pi-gen-detector/config @@ -0,0 +1,8 @@ +export IMG_NAME=mcstrack-detector +export LOCALE_DEFAULT=en_US.UTF-8 +export TIMEZONE_DEFAULT=America/Toronto +export DISABLE_FIRST_BOOT_USER_RENAME=1 +export FIRST_USER_NAME=admin +export FIRST_USER_PASS="admin" +export ENABLE_SSH=0 +export STAGE_LIST="stage0 stage1 stage2 stage6" diff --git a/pi-gen-detector/create_image.sh b/pi-gen-detector/create_image.sh new file mode 100755 index 0000000..dadc3e9 --- /dev/null +++ b/pi-gen-detector/create_image.sh @@ -0,0 +1,36 @@ +#!/bin/bash -e + + +echo "Checking dependencies..." +sudo apt install -y coreutils quilt parted qemu-user-static debootstrap zerofree zip dosfstools e2fsprogs libarchive-tools libcap2-bin grep rsync xz-utils file git curl bc gpg pigz xxd arch-test bmap-tools kmod + + + +echo "Cloning pi-gen repository..." +if [ -d "pi-gen" ]; then + rm -r pi-gen +fi +git clone --branch arm64 https://github.com/RPI-Distro/pi-gen.git + + +echo "Setting up for image creation..." +cp config pi-gen +cd pi-gen +touch ./stage3/SKIP +touch ./stage4/SKIP +touch ./stage5/SKIP +touch ./stage4/SKIP_IMAGES +touch ./stage5/SKIP_IMAGES +cp -r ../stage6 stage6 +chmod +x build.sh +chmod +x stage6/prerun.sh +chmod +x stage6/00_custom/01-run.sh +chmod +x stage6/00_custom/02-run-chroot.sh + + +echo "Building image..." +./build.sh +exitCode=$? +if [ $exitCode -ne 0 ]; then + echo "Exited with code ${exitCode}" ; exit -1 +fi diff --git a/pi-gen-detector/stage6/00_custom/00-packages b/pi-gen-detector/stage6/00_custom/00-packages new file mode 100644 index 0000000..2194977 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/00-packages @@ -0,0 +1,9 @@ +build-essential +libglib2.0-dev +libglu1-mesa-dev +libgl1-mesa-dev +libgtk-3-dev +python3-picamera2 +python3-pip +python3-venv +ufw diff --git a/pi-gen-detector/stage6/00_custom/01-run.sh b/pi-gen-detector/stage6/00_custom/01-run.sh new file mode 100755 index 0000000..e836df3 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/01-run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +install -d "${ROOTFS_DIR}/home/admin/MCSTrack/" +cp -r "${STAGE_DIR}/../../../data" "${ROOTFS_DIR}/home/admin/MCSTrack/" +cp -r "${STAGE_DIR}/../../../src" "${ROOTFS_DIR}/home/admin/MCSTrack/" +cp "${STAGE_DIR}/../../../pyproject.toml" "${ROOTFS_DIR}/home/admin/MCSTrack/" +install -d "${ROOTFS_DIR}/usr/local/bin/" +install "${STAGE_DIR}/00_custom/files/mcstrack_startup" "${ROOTFS_DIR}/usr/local/bin/" +chmod +x "${ROOTFS_DIR}/usr/local/bin/mcstrack_startup" diff --git a/pi-gen-detector/stage6/00_custom/02-run-chroot.sh b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh new file mode 100755 index 0000000..da3cfe2 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh @@ -0,0 +1,17 @@ +#!/bin/bash +sudo apt update +sudo apt upgrade -y + +# Firewall setup +sudo ufw enable +sudo ufw allow 8001 + +# Python setup +cd /home/admin/MCSTrack +python3 -m venv --system-site-packages .venv +source .venv/bin/activate +pip install --break-system-packages .[component] +deactivate + +# Run startup script on boot +sudo echo "@reboot root /usr/local/bin/mcstrack_startup >> /home/admin/mcstrack_log.log" > /etc/cron.d/startup diff --git a/pi-gen-detector/stage6/00_custom/files/mcstrack_startup b/pi-gen-detector/stage6/00_custom/files/mcstrack_startup new file mode 100644 index 0000000..595d1b9 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/files/mcstrack_startup @@ -0,0 +1,5 @@ +#!/bin/bash +cd /home/admin/MCSTrack +source .venv/bin/activate +export MCSTRACK_DETECTOR_CONFIGURATION_FILEPATH="/home/admin/MCSTrack/data/configuration/detector/rpicam_aruco.json" +python -m src.main_detector diff --git a/pi-gen-detector/stage6/EXPORT_IMAGE b/pi-gen-detector/stage6/EXPORT_IMAGE new file mode 100644 index 0000000..9008606 --- /dev/null +++ b/pi-gen-detector/stage6/EXPORT_IMAGE @@ -0,0 +1 @@ +IMG_SUFFIX="-mcs" diff --git a/pi-gen-detector/stage6/prerun.sh b/pi-gen-detector/stage6/prerun.sh new file mode 100755 index 0000000..5d99308 --- /dev/null +++ b/pi-gen-detector/stage6/prerun.sh @@ -0,0 +1,4 @@ +#!/bin/bash -e +if [ ! -d "${ROOTFS_DIR}" ]; then + copy_previous +fi diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..4b7271e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,42 @@ +[build-system] +requires = ["setuptools>=65.5"] + +[project] +name = "mcstrack" +version = "0.0.0" +authors = [ + { name="Thomas Vaughan", email="thomas.vanghan@cnrc-nrc.gc.ca" }, + { name="Keiran Barr", email="keiranbarr7@gmail.com" }, + { name="David Zhou", email="david.zhou.0110@gmail.com" }, + { name="Andras Lasso", email="lasso@queensu.ca" } +] +description = "Multi camera spatial tracking." +readme = "README.md" +requires-python = ">=3.11" +license = { text = "MIT" } +classifiers = [ + "Programming Language :: Python :: 3", + "License :: MIT" +] +dependencies = [ + "fastapi~=0.115.13", + "hjson~=3.1.0", + "numpy~=2.3.2", + "numpy-stl~=3.2.0", + "opencv-contrib-python~=4.11.0.86", + "pydantic~=2.11.7", + "scipy~=1.15.3", + "websockets~=15.0.1" +] + +[project.optional-dependencies] +gui = [ + "PyOpenGL~=3.1.7", + "wxpython~=4.2.3" +] +component = [ + "uvicorn[standard]~=0.35.0" +] + +[tool.setuptools] +py-modules = ["mcstrack"] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index ca402d9..0000000 --- a/requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -fastapi -hjson -numpy~=1.26 -numpy-stl~=3.1 -opencv-contrib-python==4.5.5.64 -pydantic>=2 -PyOpenGL==3.1.7 -scipy -uvicorn[standard] -websocket -websockets~=13.1 -wxpython - diff --git a/setup/create_image.sh b/setup/create_image.sh index 9b2a906..b27ba2b 100755 --- a/setup/create_image.sh +++ b/setup/create_image.sh @@ -69,9 +69,7 @@ chmod 777 MCSTrack pushd MCSTrack python3 -m venv .venv --system-site-packages source .venv/bin/activate -# Exclude the following packages from this install, because the detector doesn't need it -grep -ivE "wxasync|wxpython|PyOpenGL==3.1.7|PyOpenGL-accelerate==3.1.7" requirements.txt> edited_requirements.txt -pip3 install --break-system-packages -r edited_requirements.txt +pip3 install --break-system-packages .[component] popd # Create startup script diff --git a/src/board_builder/board_builder.py b/src/board_builder/board_builder.py index 653f867..dc4b56c 100644 --- a/src/board_builder/board_builder.py +++ b/src/board_builder/board_builder.py @@ -1,18 +1,18 @@ +from .structures import PoseLocation, Marker, TargetBoard +from src.common import Annotation, Matrix4x4, Pose, PoseSolver +from src.implementations.common_aruco_opencv import ArucoOpenCVCommon +from collections import defaultdict import datetime import json import os import numpy as np - -from collections import defaultdict from typing import Final -from .utils import BoardBuilderPoseSolver -from .structures import PoseLocation -from src.common.structures import Pose, MarkerSnapshot, MarkerCorners, Matrix4x4 -from src.common.structures import Marker, TargetBoard _HOMOGENEOUS_POINT_COORD: Final[int] = 4 TESTED_BOARD_NAME: str = 'top_data.json' # If collecting data for repeatability test, specify the file name. cube_data.json, planar_data.json, top_data.json +MARKER_SIZE_MM: Final[float] = 10.0 + class BoardBuilder: _detector_poses_median: dict[str, PoseLocation] @@ -39,7 +39,7 @@ def __init__(self, marker_size): self.marker_size = marker_size # in mm self._index_to_marker_uuid = dict() self._index_to_marker_id = dict() - self.pose_solver = BoardBuilderPoseSolver() + self.pose_solver = PoseSolver() # matrix init self._matrix_id_index = 0 @@ -114,13 +114,13 @@ def _filter_markers_appearing_in_multiple_detectors(data): label_counts = defaultdict(int) for snapshots in data.values(): for snapshot in snapshots: - label_counts[snapshot.label] += 1 + label_counts[snapshot.feature_label] += 1 filtered_data = {} for key, snapshots in data.items(): filtered_snapshots = [ snapshot for snapshot in snapshots - if label_counts[snapshot.label] >= 2] + if label_counts[snapshot.feature_label] >= 2] if filtered_snapshots: filtered_data[key] = filtered_snapshots return filtered_data @@ -144,31 +144,28 @@ def _find_matrix_input_index(self, pose_uuid, other_pose_uuid): return pose_index, other_pose_index - def _solve_pose(self, detector_data: dict[str, list[MarkerSnapshot]], timestamp: datetime.datetime): + def _solve_pose(self, detector_data: dict[str, list[Annotation]], timestamp: datetime.datetime): """ Given marker ids and its corner locations, find its pose """ - timestamp = datetime.datetime.utcnow() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: for marker_snapshot in detector_data[detector_name]: - if marker_snapshot.label not in list(self._index_to_marker_id.values()): - self.pose_solver.add_target_marker(int(marker_snapshot.label)) + if marker_snapshot.feature_label not in list(self._index_to_marker_id.values()): + self.pose_solver.add_target(ArucoOpenCVCommon.target_from_marker_parameters( + base_label=str(int(marker_snapshot.feature_label)), + marker_size=MARKER_SIZE_MM)) self._expand_relative_pose_matrix() - self._index_to_marker_id[self._matrix_id_index] = marker_snapshot.label + self._index_to_marker_id[self._matrix_id_index] = marker_snapshot.feature_label self._matrix_id_index += 1 for detector_name in detector_data: - for marker_snapshot in detector_data[detector_name]: - corners_list: list[list[float]] = [] # Indexed as [point][coordinate] - for corner in marker_snapshot.corner_image_points: - corners_list.append([corner.x_px, corner.y_px]) - marker_corners = MarkerCorners( + # assumes 4 corners per marker + for i in range(0, len(detector_data[detector_name]), 4): + self.pose_solver.add_detector_frame( detector_label=detector_name, - marker_id=int(marker_snapshot.label), - points=corners_list, - timestamp=timestamp - ) - self.pose_solver.add_marker_corners([marker_corners]) + frame_annotations=detector_data[detector_name], + frame_timestamp_utc=timestamp) - target_poses = self.pose_solver.get_target_poses() + _, target_poses = self.pose_solver.get_poses() self.target_poses = target_poses for pose in target_poses: if pose.target_id not in list(self._index_to_marker_uuid.values()): @@ -200,17 +197,16 @@ def _write_corners_dict_to_repeatability_test_file(self, corners_dict): json.dump(data, file, indent=4) @staticmethod - def _write_detector_data_to_recording_file(detector_data: dict[str, list[MarkerSnapshot]], data_description: str): + def _write_detector_data_to_recording_file(detector_data: dict[str, list[Annotation]], data_description: str): formatted_data = {} - timestamp = datetime.datetime.utcnow().isoformat() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() for detector_name, snapshots in detector_data.items(): formatted_data[detector_name] = [] for snapshot in snapshots: snapshot_data = { - "label": snapshot.label, - "corner_image_points": [{"x_px": pt.x_px, "y_px": pt.y_px} for pt in snapshot.corner_image_points], - "timestamp": timestamp - } + "label": snapshot.feature_label, + "corner_image_points": [snapshot.x_px, snapshot.y_px], + "timestamp": timestamp} formatted_data[detector_name].append(snapshot_data) current_dir = os.path.dirname(__file__) @@ -241,26 +237,20 @@ def _write_detector_data_to_recording_file(detector_data: dict[str, list[MarkerS json.dump(existing_data, f, indent=4) # public methods - def locate_reference_board(self, detector_data: dict[str, list[MarkerSnapshot]]): + def locate_reference_board(self, detector_data: dict[str, list[Annotation]]): # self._write_detector_data_to_recording_file(detector_data, "LOCATE REFERENCE DATA") if all(isinstance(v, list) and len(v) == 0 for v in detector_data.values()): return self.detector_poses = [] - timestamp = datetime.datetime.utcnow() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: - for marker_snapshot in detector_data[detector_name]: - corners_list: list[list[float]] = [] - for corner in marker_snapshot.corner_image_points: - corners_list.append([corner.x_px, corner.y_px]) - marker_corners = MarkerCorners( + for i in range(0, len(detector_data[detector_name]), 4): + self.pose_solver.add_detector_frame( detector_label=detector_name, - marker_id=int(marker_snapshot.label), - points=corners_list, - timestamp=timestamp - ) - self.pose_solver.add_marker_corners([marker_corners]) + frame_annotations=detector_data[detector_name], + frame_timestamp_utc=timestamp) - new_detector_poses = self.pose_solver.get_detector_poses() + new_detector_poses, _ = self.pose_solver.get_poses() for pose in new_detector_poses: if pose.target_id not in self._detector_poses_median: self._detector_poses_median[pose.target_id] = PoseLocation(pose.target_id) @@ -268,20 +258,23 @@ def locate_reference_board(self, detector_data: dict[str, list[MarkerSnapshot]]) pose.object_to_reference_matrix.as_numpy_array(), timestamp.isoformat()) for label in self._detector_poses_median: - pose = Pose( + detector_to_reference: Matrix4x4 = \ + self._detector_poses_median[label].get_median_pose().object_to_reference_matrix + self.pose_solver.set_extrinsic_matrix( + detector_label=label, + transform_to_reference=detector_to_reference) + self.detector_poses.append(Pose( target_id=label, - object_to_reference_matrix=self._detector_poses_median[label].get_median_pose().object_to_reference_matrix, - solver_timestamp_utc_iso8601=timestamp.isoformat()) - self.detector_poses.append(pose) - self.pose_solver.set_detector_poses(self.detector_poses) + object_to_reference_matrix=detector_to_reference, + solver_timestamp_utc_iso8601=timestamp.isoformat())) - def collect_data(self, detector_data: dict[str, list[MarkerSnapshot]]): + def collect_data(self, detector_data: dict[str, list[Annotation]]): """ Collects data of relative position and is entered in matrix. Returns a dictionary of its corners""" # self._write_detector_data_to_recording_file(detector_data, "COLLECTION DATA") detector_data = self._filter_markers_appearing_in_multiple_detectors(detector_data) if all(isinstance(v, list) and len(v) == 0 for v in detector_data.values()): return - timestamp = datetime.datetime.utcnow() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc) corners_dict = {} self.target_poses = [] self._solve_pose(detector_data, timestamp) @@ -349,7 +342,7 @@ def build_board(self): pose = Pose( target_id=marker_id, object_to_reference_matrix=Matrix4x4.from_numpy_array(np.array(T)), - solver_timestamp_utc_iso8601=str(datetime.datetime.utcnow())) + solver_timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) self.target_poses.append(pose) corners = self._calculate_corners_location(T, self.local_corners) predicted_corners[marker_id] = corners diff --git a/src/board_builder/utils/graph_search.py b/src/board_builder/graph_search.py similarity index 99% rename from src/board_builder/utils/graph_search.py rename to src/board_builder/graph_search.py index a7c52aa..a2f8256 100644 --- a/src/board_builder/utils/graph_search.py +++ b/src/board_builder/graph_search.py @@ -1,8 +1,8 @@ +from src.board_builder.structures import MatrixNode, PoseLocation +from src.common import Matrix4x4 from collections import deque, defaultdict from typing import Dict, List, Tuple -from src.board_builder.structures import MatrixNode, PoseLocation -from src.common.structures import Matrix4x4 def create_graph( relative_pose_matrix: List[List[PoseLocation | None]], diff --git a/src/board_builder/structures.py b/src/board_builder/structures.py new file mode 100644 index 0000000..9b5e603 --- /dev/null +++ b/src/board_builder/structures.py @@ -0,0 +1,145 @@ +from src.common import MathUtils, Matrix4x4, Pose +import abc +import datetime +import numpy as np +from pydantic import BaseModel, Field, PrivateAttr +from scipy.spatial.transform import Rotation as R + + +class MatrixNode: + def __init__(self, node_id: str): + self.id = node_id + self.neighbours = [] + self.weights = {} + + def add_neighbour(self, neighbour_node, weight: int): + self.neighbours.append(neighbour_node) + self.weights[neighbour_node.id] = weight + + +class PoseLocation: + + _id: str + _timestamp: str + _TMatrix: np.ndarray + _RMAT_list: list + _TVEC_list: list + + def __init__(self, object_id): + self._id = object_id + self._timestamp = str(datetime.datetime.now()) + + self._TMatrix = np.eye(4) + self._RMAT_list = [] # Rotation matrix + self._TVEC_list = [] # Translation vector + + self.frame_count = 0 + + def add_matrix(self, transformation_matrix: Matrix4x4, timestamp: str): + self._timestamp = timestamp + + self._RMAT_list.append(transformation_matrix[:3, :3]) + self._TVEC_list.append(transformation_matrix[:3, 3]) + + avg_translation = np.mean(self._TVEC_list, axis=0) + + quaternions = [R.from_matrix(rot).as_quat(canonical=True) for rot in self._RMAT_list] + quaternions = [[float(quaternion[i]) for i in range(0, 4)] for quaternion in quaternions] + avg_quat = MathUtils.average_quaternion(quaternions) + avg_rotation = R.from_quat(avg_quat).as_matrix() + + self._TMatrix[:3, :3] = avg_rotation + self._TMatrix[:3, 3] = avg_translation + + def get_matrix(self): + return self._TMatrix + + def get_average_pose(self): + pose = Pose( + target_id=self._id, + object_to_reference_matrix=Matrix4x4.from_numpy_array(self._TMatrix), + solver_timestamp_utc_iso8601=self._timestamp + ) + return pose + + def get_median_pose(self): + if not self._RMAT_list or not self._TVEC_list: + raise ValueError("No matrices available to compute the median.") + + rmat_array = np.array(self._RMAT_list) + tvec_array = np.array(self._TVEC_list) + + median_rmat = np.median(rmat_array, axis=0) + median_tvec = np.median(tvec_array, axis=0) + + median_transformation_matrix = np.eye(4) + median_transformation_matrix[:3, :3] = median_rmat + median_transformation_matrix[:3, 3] = median_tvec + + pose = Pose( + target_id=self._id, + object_to_reference_matrix=Matrix4x4.from_numpy_array(median_transformation_matrix), + solver_timestamp_utc_iso8601=self._timestamp + ) + + return pose + + +class Marker(BaseModel): + marker_id: str = Field() + marker_size: float | None = Field(default=None) + points: list[list[float]] | None = Field(default=None) + + def get_points_internal(self) -> list[list[float]]: + # Use the TargetBase.get_points() instead. + if self.points is None: + if self.marker_size is None: + raise RuntimeError("TargetMarker defined with neither marker_size nor points.") + half_width = self.marker_size / 2.0 + self.points = [ + [-half_width, half_width, 0.0], + [half_width, half_width, 0.0], + [half_width, -half_width, 0.0], + [-half_width, -half_width, 0.0]] + return self.points + + +class TargetBase(BaseModel, abc.ABC): + label: str = Field() + + @abc.abstractmethod + def get_marker_ids(self) -> list[str]: ... + + @abc.abstractmethod + def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: ... + + @abc.abstractmethod + def get_points(self) -> list[list[float]]: ... + + +class TargetBoard(TargetBase): + markers: list[Marker] = Field() + _marker_dict: None | dict[str, Marker] = PrivateAttr() + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._marker_dict = None + + def get_marker_ids(self) -> list[str]: + return [marker.marker_id for marker in self.markers] + + def get_points(self) -> list[list[float]]: + points = list() + for marker in self.markers: + points += marker.get_points_internal() + return points + + def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: + if self._marker_dict is None: + self._marker_dict = dict() + for marker in self.markers: + self._marker_dict[marker.marker_id] = marker + if marker_id not in self._marker_dict: + raise IndexError(f"marker_id {marker_id} is not in target {self.label}") + return self._marker_dict[marker_id].points + diff --git a/src/board_builder/structures/__init__.py b/src/board_builder/structures/__init__.py deleted file mode 100644 index daa65e3..0000000 --- a/src/board_builder/structures/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .marker_ray_set import MarkerRaySet -from .pose_data import PoseData -from .pose_location import PoseLocation -from .target_base import Target -from .target_marker import TargetMarker -from .matrix_node import MatrixNode diff --git a/src/board_builder/structures/marker_corners.py b/src/board_builder/structures/marker_corners.py deleted file mode 100644 index 8ef2867..0000000 --- a/src/board_builder/structures/marker_corners.py +++ /dev/null @@ -1,21 +0,0 @@ -import datetime - - -# TODO: Merge into a similar structure in common -class MarkerCorners: - detector_label: str - marker_id: int - points: list[list[float]] - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - marker_id: int, - points: list[list[float]], - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.marker_id = marker_id - self.points = points - self.timestamp = timestamp diff --git a/src/board_builder/structures/marker_ray_set.py b/src/board_builder/structures/marker_ray_set.py deleted file mode 100644 index 81d651b..0000000 --- a/src/board_builder/structures/marker_ray_set.py +++ /dev/null @@ -1,36 +0,0 @@ -from src.common.structures import Matrix4x4 -import datetime -from pydantic import BaseModel, Field - - -class MarkerRaySet(BaseModel): - marker_id: int = Field() - image_points: list[list[float]] = Field() # image positions of marker corners. Size 4. - image_timestamp: datetime.datetime = Field() - ray_origin_reference: list[float] = Field() # Shared origin for all rays (same detector) - ray_directions_reference: list[list[float]] = Field() # Size 4 (one for each image point) - detector_label: str = Field() - detector_to_reference_matrix: Matrix4x4 = Field() - - @staticmethod - def age_seconds( - marker_ray_set, - query_timestamp: datetime.datetime - ): - return (query_timestamp - marker_ray_set.image_timestamp).total_seconds() - - @staticmethod - def newest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: - return_value = datetime.datetime.now() - for ray_set in marker_ray_set_list: - if ray_set.image_timestamp > return_value: - return_value = ray_set.image_timestamp - return return_value - - @staticmethod - def oldest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: - return_value = datetime.datetime.utcfromtimestamp(0) - for ray_set in marker_ray_set_list: - if ray_set.image_timestamp > return_value: - return_value = ray_set.image_timestamp - return return_value diff --git a/src/board_builder/structures/matrix_node.py b/src/board_builder/structures/matrix_node.py deleted file mode 100644 index 2c83f16..0000000 --- a/src/board_builder/structures/matrix_node.py +++ /dev/null @@ -1,9 +0,0 @@ -class MatrixNode: - def __init__(self, node_id: str): - self.id = node_id - self.neighbours = [] - self.weights = {} - - def add_neighbour(self, neighbour_node, weight: int): - self.neighbours.append(neighbour_node) - self.weights[neighbour_node.id] = weight diff --git a/src/board_builder/structures/pose_data.py b/src/board_builder/structures/pose_data.py deleted file mode 100644 index 13205ba..0000000 --- a/src/board_builder/structures/pose_data.py +++ /dev/null @@ -1,24 +0,0 @@ -from .marker_ray_set import MarkerRaySet -from src.common.structures import Matrix4x4 -import datetime -from pydantic import BaseModel, Field - - -# TODO: Merge/replace this with pose under common data structures -class PoseData(BaseModel): - target_id: str = Field() - object_to_reference_matrix: Matrix4x4 = Field() - ray_sets: list[MarkerRaySet] - - def newest_timestamp(self) -> datetime.datetime: - return MarkerRaySet.newest_timestamp_in_list(self.ray_sets) - - def oldest_timestamp(self) -> datetime.datetime: - return MarkerRaySet.oldest_timestamp_in_list(self.ray_sets) - - @staticmethod - def age_seconds( - pose, - query_timestamp: datetime.datetime - ) -> float: - return (query_timestamp - pose.oldest_timestamp()).total_seconds() diff --git a/src/board_builder/structures/pose_location.py b/src/board_builder/structures/pose_location.py deleted file mode 100644 index 8132de6..0000000 --- a/src/board_builder/structures/pose_location.py +++ /dev/null @@ -1,76 +0,0 @@ -import datetime - -import numpy as np -from numpy._typing import NDArray -from typing import Any -from scipy.spatial.transform import Rotation as R -from src.pose_solver.util.average_quaternion import average_quaternion -from src.common.structures import Matrix4x4, Pose - - -class PoseLocation: - - _id: str - _timestamp: str - _TMatrix: NDArray[Any] - _RMAT_list: list - _TVEC_list: list - - def __init__(self, object_id): - self._id = object_id - self._timestamp = str(datetime.datetime.now()) - - self._TMatrix = np.eye(4) - self._RMAT_list = [] # Rotation matrix - self._TVEC_list = [] # Translation vector - - self.frame_count = 0 - - def add_matrix(self, transformation_matrix: Matrix4x4, timestamp: str): - self._timestamp = timestamp - - self._RMAT_list.append(transformation_matrix[:3, :3]) - self._TVEC_list.append(transformation_matrix[:3, 3]) - - avg_translation = np.mean(self._TVEC_list, axis=0) - - quaternions = [R.from_matrix(rot).as_quat(canonical=True) for rot in self._RMAT_list] - quaternions = [[float(quaternion[i]) for i in range(0, 4)] for quaternion in quaternions] - avg_quat = average_quaternion(quaternions) - avg_rotation = R.from_quat(avg_quat).as_matrix() - - self._TMatrix[:3, :3] = avg_rotation - self._TMatrix[:3, 3] = avg_translation - - def get_matrix(self): - return self._TMatrix - - def get_average_pose(self): - pose = Pose( - target_id=self._id, - object_to_reference_matrix=Matrix4x4.from_numpy_array(self._TMatrix), - solver_timestamp_utc_iso8601=self._timestamp - ) - return pose - - def get_median_pose(self): - if not self._RMAT_list or not self._TVEC_list: - raise ValueError("No matrices available to compute the median.") - - rmat_array = np.array(self._RMAT_list) - tvec_array = np.array(self._TVEC_list) - - median_rmat = np.median(rmat_array, axis=0) - median_tvec = np.median(tvec_array, axis=0) - - median_transformation_matrix = np.eye(4) - median_transformation_matrix[:3, :3] = median_rmat - median_transformation_matrix[:3, 3] = median_tvec - - pose = Pose( - target_id=self._id, - object_to_reference_matrix=Matrix4x4.from_numpy_array(median_transformation_matrix), - solver_timestamp_utc_iso8601=self._timestamp - ) - - return pose diff --git a/src/board_builder/structures/target_base.py b/src/board_builder/structures/target_base.py deleted file mode 100644 index 1e2e25c..0000000 --- a/src/board_builder/structures/target_base.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydantic import BaseModel - - -class Target(BaseModel): - pass diff --git a/src/board_builder/structures/target_marker.py b/src/board_builder/structures/target_marker.py deleted file mode 100644 index ccb6184..0000000 --- a/src/board_builder/structures/target_marker.py +++ /dev/null @@ -1,7 +0,0 @@ -from .target_base import Target -from pydantic import Field - - -class TargetMarker(Target): - marker_id: int = Field() - marker_size: float = Field() diff --git a/src/board_builder/test/accuracy/accuracy_test.py b/src/board_builder/test/accuracy/accuracy_test.py index 7061a04..c68ecf5 100644 --- a/src/board_builder/test/accuracy/accuracy_test.py +++ b/src/board_builder/test/accuracy/accuracy_test.py @@ -1,21 +1,17 @@ -import os - -import numpy as np -import importlib -import json - -from structures import AccuracyTestParameters from src.board_builder.board_builder import BoardBuilder -from src.common.util import register_corresponding_points -from src.common.structures import \ - MarkerCornerImagePoint, \ - MarkerSnapshot, \ - TargetBoard, \ - Marker -from utils import \ +from src.common import Annotation, MathUtils +from src.board_builder.structures import \ + Marker, \ + TargetBoard +from .structures import AccuracyTestParameters +from .utils import \ generate_virtual_snapshots, \ generate_data, \ graph_renderer +import json +import numpy as np +import importlib +import os class AccuracyTest: @@ -49,12 +45,10 @@ def _add_noise_to_corners(self, data): # Apply noise for i, corner in enumerate(marker_snapshot.corner_image_points): - noisy_corner_x = corner.x_px + noise[i * 2] - noisy_corner_y = corner.y_px + noise[i * 2 + 1] - noisy_corners.append(MarkerCornerImagePoint(x_px=noisy_corner_x, y_px=noisy_corner_y)) - - noisy_marker_snapshot = MarkerSnapshot(label=marker_snapshot.label, corner_image_points=noisy_corners) - noisy_marker_snapshots.append(noisy_marker_snapshot) + noisy_marker_snapshots.append(Annotation( + feature_label=f"{marker_snapshot.feature_label}_{i}", + x_px=corner.x_px + noise[i * 2], + y_px=corner.y_px + noise[i * 2 + 1])) noisy_data[detector_name] = noisy_marker_snapshots return noisy_data @@ -72,7 +66,7 @@ def transform_point(point, matrix): simulated_points: list[list[float]] = simulated_board.get_points() # Get the transformation matrix - transformation_matrix = register_corresponding_points( + transformation_matrix = MathUtils.register_corresponding_points( point_set_from=target_points, point_set_to=simulated_points, use_oomori_mirror_fix=False) @@ -87,7 +81,7 @@ def transform_point(point, matrix): Marker(marker_id=marker.marker_id, marker_size=marker.marker_size, points=aligned_points)) # Return the aligned TargetBoard - return TargetBoard(target_id=target_board.target_id, markers=aligned_markers) + return TargetBoard(target_id=target_board.label, markers=aligned_markers) @staticmethod def _calculate_rms_error_of_two_corner_dataset( @@ -148,7 +142,7 @@ def _write_results_to_file(self, module_name: str, snapshots, two_dimension_coll { detector_name: [ { - "label": snapshot.label, + "label": snapshot.feature_label, "corner_image_points": [{"x_px": pt.x_px, "y_px": pt.y_px} for pt in snapshot.corner_image_points] } @@ -165,7 +159,7 @@ def _write_results_to_file(self, module_name: str, snapshots, two_dimension_coll "generated_board_poses": snapshots_serializable, "projected_2D_points": two_dimension_collection_data_serializable, "predicted_board": { - "target_id": predicted_board.target_id, + "target_id": predicted_board.label, "markers": [ { "marker_id": marker.marker_id, @@ -174,7 +168,7 @@ def _write_results_to_file(self, module_name: str, snapshots, two_dimension_coll ] }, "simulated_board": { - "target_id": simulated_board.target_id, + "target_id": simulated_board.label, "markers": [ { "marker_id": marker.marker_id, diff --git a/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py b/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py index 0b42bec..a900fe4 100644 --- a/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py +++ b/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py @@ -1,6 +1,6 @@ import numpy as np -from src.common.structures import Matrix4x4, Pose, IntrinsicParameters +from src.common import Matrix4x4, Pose, IntrinsicParameters class AccuracyTestParameters: diff --git a/src/board_builder/test/accuracy/utils/generate_data.py b/src/board_builder/test/accuracy/utils/generate_data.py index dcd957b..5d87a68 100644 --- a/src/board_builder/test/accuracy/utils/generate_data.py +++ b/src/board_builder/test/accuracy/utils/generate_data.py @@ -1,11 +1,8 @@ -import math -import random import numpy as np from src.board_builder.test.accuracy.structures import AccuracyTestParameters from .projection import projection from collections import defaultdict - -from src.common.structures import MarkerSnapshot, MarkerCornerImagePoint +from src.common.structures import Annotation def find_z_axis_intersection(matrix4x4): @@ -76,7 +73,7 @@ def generate_data(board_coordinates, detector_poses, remove_markers_out_of_frame Also trims the data to remove any occluded marker """ parameters = AccuracyTestParameters() - collection_data: dict[str, list[MarkerSnapshot]] = {} + collection_data: dict[str, list[Annotation]] = {} occluded_markers = defaultdict(list, {pose.target_id: [] for pose in detector_poses}) # A list of markers that are occluded (self occlusion or perpendicular) # Collect data @@ -100,13 +97,12 @@ def generate_data(board_coordinates, detector_poses, remove_markers_out_of_frame marker_corners.append(pixel) if len(marker_corners) == 4: - marker_snapshot = MarkerSnapshot(label=str(marker), corner_image_points=[]) - marker_snapshot.label = str(marker) - marker_corner_image_point_list = [] - for marker_corner in marker_corners: - marker_corner_image_point = MarkerCornerImagePoint(x_px=marker_corner[0], y_px=marker_corner[1]) - marker_corner_image_point_list.append(marker_corner_image_point) - marker_snapshot_list.append(MarkerSnapshot(label=str(marker), corner_image_points=marker_corner_image_point_list)) + for corner_index, marker_corner in enumerate(marker_corners): + corner_label: str = f"{str(marker)}_{corner_index}" + marker_snapshot_list.append(Annotation( + feature_label=corner_label, + x_px=marker_corner[0], + y_px=marker_corner[1])) collection_data[pose.target_id] = marker_snapshot_list diff --git a/src/board_builder/test/accuracy/utils/graph_renderer.py b/src/board_builder/test/accuracy/utils/graph_renderer.py index 6c3347a..07d9b2c 100644 --- a/src/board_builder/test/accuracy/utils/graph_renderer.py +++ b/src/board_builder/test/accuracy/utils/graph_renderer.py @@ -80,7 +80,7 @@ def plot_quadrilateral(ax, corners, marker_id, color): ax = ax_green if detector == 'camera 1' else ax_blue for marker_snapshot in marker_snapshots: corners = np.array([(corner.x_px, corner.y_px) for corner in marker_snapshot.corner_image_points]) - plot_quadrilateral(ax, corners, f'M{marker_snapshot.label}S{i}', color) + plot_quadrilateral(ax, corners, f'M{marker_snapshot.feature_label}S{i}', color) # Set limits and aspect for ax in [ax_green, ax_blue]: diff --git a/src/board_builder/test/graph_search_test.py b/src/board_builder/test/graph_search_test.py index 233f563..c9d6fba 100644 --- a/src/board_builder/test/graph_search_test.py +++ b/src/board_builder/test/graph_search_test.py @@ -1,7 +1,8 @@ -from src.board_builder.structures import PoseLocation import datetime -from src.common.structures import Matrix4x4 -from src.board_builder.utils.graph_search import create_graph, bfs_shortest_path, get_transform_from_root +from src.common import Matrix4x4 +from src.board_builder.structures import PoseLocation +from src.board_builder.graph_search import create_graph, bfs_shortest_path, get_transform_from_root + """ Description: Test case for the BFS/graph_search algorithm @@ -50,7 +51,7 @@ ]).as_numpy_array() -timestamp = str(datetime.datetime.utcnow()) +timestamp = datetime.datetime.now(tz=datetime.timezone.utc) poseLocation_01 = PoseLocation("01") poseLocation_01.frame_count += 60 diff --git a/src/board_builder/test/repeatability/repeatability_test.py b/src/board_builder/test/repeatability/repeatability_test.py index c3c692c..7c674f4 100644 --- a/src/board_builder/test/repeatability/repeatability_test.py +++ b/src/board_builder/test/repeatability/repeatability_test.py @@ -1,7 +1,8 @@ +from src.common import MathUtils import json import os import numpy as np -from src.common.util import register_corresponding_points + def transform_point(point, matrix): """Applies a 4x4 transformation matrix to a 3D point.""" @@ -39,7 +40,8 @@ def register_all_runs(data): # Check if the lengths of the point sets differ if len(current_corners) == len(reference_corners): # Register the entire set of points in the current run to the reference run - transformation_matrix = register_corresponding_points(current_corners.tolist(), reference_corners.tolist()) + transformation_matrix = MathUtils.register_corresponding_points( + current_corners.tolist(), reference_corners.tolist()) # Apply the transformation to the current run registered_run = {} diff --git a/src/board_builder/utils/__init__.py b/src/board_builder/utils/__init__.py deleted file mode 100644 index ea42dcb..0000000 --- a/src/board_builder/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from src.board_builder.utils.board_builder_pose_solver import BoardBuilderPoseSolver diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py deleted file mode 100644 index 7fd64ad..0000000 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ /dev/null @@ -1,779 +0,0 @@ -from src.board_builder.structures import \ - MarkerRaySet, \ - PoseData, \ - PoseLocation -from src.common.structures import \ - CharucoBoardSpecification, \ - IntrinsicParameters, \ - MarkerCorners, \ - Matrix4x4, \ - Pose, \ - TargetBase, \ - TargetMarker -from src.common.util import MathUtils -from src.common.util import register_corresponding_points -from src.pose_solver.structures import \ - Ray, \ - PoseSolverParameters -from src.pose_solver.util import \ - average_quaternion, \ - convex_quadrilateral_area, \ - closest_intersection_between_n_lines, \ - IterativeClosestPointParameters, \ - iterative_closest_point_for_points_and_rays -import cv2 -import cv2.aruco -import datetime -import numpy -from scipy.spatial.transform import Rotation -from typing import Callable, TypeVar -import uuid - - -KeyType = TypeVar("KeyType") -ValueType = TypeVar("ValueType") - - -class ImagePointSetsKey: - detector_label: str - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.timestamp = timestamp - - def _key(self): - return self.detector_label, self.timestamp - - def __eq__(self, other): - if isinstance(other, ImagePointSetsKey): - return self._key() == other._key() - return False - - def __hash__(self): - return hash(self._key()) - - -class MarkerKey: - detector_label: str - marker_id: str - - def __init__( - self, - detector_label: str, - marker_id: str - ): - self.detector_label = detector_label - self.marker_id = marker_id - - def _key(self): - return self.detector_label, self.marker_id - - def __eq__(self, other): - if isinstance(other, MarkerKey): - return self._key() == other._key() - return False - - def __hash__(self): - return hash(self._key()) - - def __str__(self): - return str("(" + self.detector_label + "," + str(self.marker_id) + ")") - - -class CornerSetReference: - marker_id: str - corners: list[list[float]] # in reference coordinate system - ray_sets: list[MarkerRaySet] - - def __init__( - self, - marker_id: str, - corners: list[list[float]], - ray_sets: list[MarkerRaySet] - ): - self.marker_id = marker_id - self.corners = corners - self.ray_sets = ray_sets - - -class TargetDepthKey: - target_id: uuid.UUID - detector_label: str - - def __init__( - self, - target_id: uuid.UUID, - detector_label: str - ): - self.target_id = target_id - self.detector_label = detector_label - - def _key(self): - return self.target_id, self.detector_label - - def __eq__(self, other): - if isinstance(other, TargetDepthKey): - return self._key() == other._key() - return False - - def __hash__(self): - return hash(self._key()) - - -class TargetDepth: - target_id: uuid.UUID - detector_label: str - estimate_timestamp: datetime.datetime - depth: float - - def __init__( - self, - target_id: uuid.UUID, - detector_label: str, - estimate_timestamp: datetime.datetime, - depth: float - ): - self.target_id = target_id - self.detector_label = detector_label - self.estimate_timestamp = estimate_timestamp - self.depth = depth - - @staticmethod - def age_seconds( - target_depth, - query_timestamp: datetime.datetime - ): - return (query_timestamp - target_depth.estimate_timestamp).total_seconds() - - -class BoardBuilderPoseSolver: - """ - Class containing the actual "solver" logic, kept separate from the API. - """ - - _intrinsics_by_detector_label: dict[str, IntrinsicParameters] - _targets: dict[uuid.UUID, TargetBase] - - _marker_corners_since_update: list[MarkerCorners] - - _marker_rayset_by_marker_key: dict[MarkerKey, MarkerRaySet] - - _alpha_poses_by_target_id: dict[uuid.UUID, list[PoseData]] - _target_extrapolation_poses_by_target_id: dict[uuid.UUID, list[PoseData]] - _poses_by_target_id: dict[uuid.UUID, PoseData] - _poses_by_detector_label: dict[str, Matrix4x4] - _target_depths_by_target_depth_key: dict[TargetDepthKey, list[TargetDepth]] - _poses_average_by_detector_label: dict[str, PoseLocation] - _detector_poses: list[Pose] - - _minimum_marker_age_before_removal_seconds: float - - _board_marker_ids: list[int] - _board_marker_positions: list[list[float]] - _board_marker_size: int - - def __init__(self): - - self._intrinsics_by_detector_label = dict() - self._parameters = PoseSolverParameters() - self._targets = dict() - self._marker_corners_since_update = list() - self._marker_rayset_by_marker_key = dict() - self._alpha_poses_by_target_id = dict() - self._target_extrapolation_poses_by_target_id = dict() - self._poses_by_target_id = dict() - self._poses_by_detector_label = dict() - self._target_depths_by_target_depth_key = dict() - self._poses_average_by_detector_label = dict() - self._detector_poses = list() - - self._minimum_marker_age_before_removal_seconds = max([ - self._parameters.POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS, - self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS]) - - self._charuco_board = CharucoBoardSpecification() - self._board_marker_ids = self._charuco_board.get_marker_ids() - self._board_marker_positions = self._charuco_board.get_marker_center_points() - self._board_marker_size = 10 - - def add_marker_corners( - self, - detected_corners: list[MarkerCorners] - ) -> None: - self._marker_corners_since_update += detected_corners - - def add_target_marker( - self, - marker_id: int, - ) -> bool: - for target_id, target in self._targets.items(): - if isinstance(target, TargetMarker) and marker_id == target.marker_id: - return False - target: TargetBase = TargetMarker( - target_id=str(marker_id), - marker_id=str(marker_id), - marker_size=self._board_marker_size) - target_id: uuid.UUID = uuid.uuid4() - self._targets[target_id] = target - return True - - def get_detector_poses( - self - ) -> list[Pose]: - self._estimate_detector_pose_relative_to_reference() - detector_poses: list[Pose] = [ - Pose( - target_id=detector_label, - object_to_reference_matrix=pose, - solver_timestamp_utc_iso8601=str(datetime.datetime.utcnow().isoformat())) - for detector_label, pose in self._poses_by_detector_label.items()] - return detector_poses - - def get_target_poses( - self - ) -> list[Pose]: - self._estimate_target_pose_relative_to_reference() - target_poses: list[Pose] = [ - Pose( - target_id=str(target_id), - object_to_reference_matrix=pose.object_to_reference_matrix, - solver_timestamp_utc_iso8601=str(pose.newest_timestamp().isoformat())) - for target_id, pose in self._poses_by_target_id.items()] - return target_poses - - def set_intrinsic_parameters( - self, - detector_label: str, - intrinsic_parameters: IntrinsicParameters - ) -> None: - self._intrinsics_by_detector_label[detector_label] = intrinsic_parameters - - def set_detector_poses(self, detector_poses): - self._detector_poses = detector_poses - - def set_board_marker_size(self, board_marker_size): - self._board_marker_size = board_marker_size - - def _calculate_marker_ray_set( - self, - image_point_set: MarkerCorners, - detector_to_reference_matrix: Matrix4x4 - ) -> MarkerRaySet: - undistorted_points_original: numpy.ndarray = numpy.array(image_point_set.points, dtype="float32") - undistorted_points_original = numpy.reshape( - a=undistorted_points_original, - newshape=(1, len(image_point_set.points), 2)) - camera_matrix: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[image_point_set.detector_label].get_matrix()) - distortion_coefficients: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[image_point_set.detector_label].get_distortion_coefficients()) - undistorted_points_normalized = cv2.undistortPoints( - src=undistorted_points_original, - cameraMatrix=camera_matrix, - distCoeffs=distortion_coefficients) - ray_directions: list[list[float]] = list() - origin_point_detector = [0, 0, 0, 1] # origin - detector_to_reference: numpy.ndarray = detector_to_reference_matrix.as_numpy_array() - ray_origin_reference = numpy.matmul(detector_to_reference, origin_point_detector) - ray_origin_reference = ray_origin_reference.tolist()[0:3] - for point_normalized in undistorted_points_normalized: - target_point_image = [point_normalized[0, 0], point_normalized[0, 1], 1, 1] # reverse perspective - target_point_detector = MathUtils.image_to_opengl_vector(target_point_image) - ray_direction_detector = numpy.subtract(target_point_detector, origin_point_detector) - ray_direction_detector = ray_direction_detector / numpy.linalg.norm(ray_direction_detector) # normalize - ray_direction_reference = numpy.matmul(detector_to_reference, ray_direction_detector) - ray_directions.append(list(ray_direction_reference[0:3])) - return MarkerRaySet( - marker_id=image_point_set.marker_id, - image_points=image_point_set.points, - image_timestamp=image_point_set.timestamp, - detector_label=image_point_set.detector_label, - detector_to_reference_matrix=detector_to_reference_matrix, - ray_origin_reference=ray_origin_reference, - ray_directions_reference=ray_directions) - - def _clear_old_values( - self, - query_timestamp: datetime.datetime - ) -> bool: # whether any dict's have changed or not - changed = False - self._marker_rayset_by_marker_key, modified = self._clear_old_values_from_dict( - input_dict=self._marker_rayset_by_marker_key, - age_from_value_function=MarkerRaySet.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._minimum_marker_age_before_removal_seconds) - changed |= modified - self._alpha_poses_by_target_id, modified = self._clear_old_values_from_dict_of_lists( - input_dict=self._alpha_poses_by_target_id, - age_from_value_function=PoseData.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._parameters.POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS) - changed |= modified - self._target_extrapolation_poses_by_target_id, modified = self._clear_old_values_from_dict_of_lists( - input_dict=self._target_extrapolation_poses_by_target_id, - age_from_value_function=PoseData.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._parameters.POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS) - changed |= modified - self._target_depths_by_target_depth_key, modified = self._clear_old_values_from_dict_of_lists( - input_dict=self._target_depths_by_target_depth_key, - age_from_value_function=TargetDepth.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS) - changed |= modified - return changed - - @staticmethod - def _clear_old_values_from_dict( - input_dict: dict[KeyType, ValueType], - age_from_value_function: Callable[[ValueType, datetime.datetime], float], - query_timestamp: datetime.datetime, - maximum_age_seconds: float - ) -> tuple[dict[KeyType, ValueType], bool]: # modified_dictionary, changes_found - changed: bool = False - output_dict: dict[KeyType, ValueType] = dict() - for input_key, input_value in input_dict.items(): - age_seconds: float = age_from_value_function(input_value, query_timestamp) - if age_seconds <= maximum_age_seconds: - output_dict[input_key] = input_value - else: - changed = True - return output_dict, changed - - @staticmethod - def _clear_old_values_from_dict_of_lists( - input_dict: dict[KeyType, list[ValueType]], - age_from_value_function: Callable[[ValueType, datetime.datetime], float], - query_timestamp: datetime.datetime, - maximum_age_seconds: float - ) -> tuple[dict[KeyType, list[ValueType]], bool]: # modified_dictionary, changes_found - changed: bool = False - output_dict: dict[KeyType, list[ValueType]] = dict() - for input_key in input_dict.keys(): - output_poses_for_label: list[ValueType] = list() - for pose in input_dict[input_key]: - age_seconds: float = age_from_value_function(pose, query_timestamp) - if age_seconds <= maximum_age_seconds: - output_poses_for_label.append(pose) - else: - changed = True - output_dict[input_key] = output_poses_for_label - return output_dict, changed - - def _corresponding_point_list_in_target( - self, - target_id: uuid.UUID - ) -> list[list[float]]: - points = list() - if target_id not in self._targets: - raise RuntimeError(f"Could not find target {str(target_id)} in domain.") - target: TargetBase = self._targets[target_id] - if isinstance(target, TargetMarker): - half_width = target.marker_size / 2.0 - points += [ - [-half_width, half_width, 0.0], - [half_width, half_width, 0.0], - [half_width, -half_width, 0.0], - [-half_width, -half_width, 0.0]] - return points - - def _estimate_target_pose_from_ray_set( - self, - target: TargetBase, - ray_set: MarkerRaySet - ) -> tuple[list[float], list[float]]: - corners = numpy.array([ray_set.image_points], dtype="float32") - detector_label: str = ray_set.detector_label - camera_matrix: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[detector_label].get_matrix(), dtype="float32") - distortion_coefficients: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[detector_label].get_distortion_coefficients(), dtype="float32") - rotation_vector: numpy.ndarray - translation_vector: numpy.ndarray - if isinstance(target, TargetMarker): - half_width = target.marker_size / 2.0 - reference_points: numpy.ndarray = numpy.array([ - [-half_width, half_width, 0.0], - [ half_width, half_width, 0.0], - [ half_width, -half_width, 0.0], - [-half_width, -half_width, 0.0]], - dtype="float32") - reference_points = numpy.reshape(reference_points, newshape=(1, 4, 3)) - corners = numpy.reshape(corners, newshape=(1, 4, 2)) - _, rotation_vector, translation_vector = cv2.solvePnP( - objectPoints=reference_points, - imagePoints=corners, - cameraMatrix=camera_matrix, - distCoeffs=distortion_coefficients) - else: - raise NotImplementedError("Only targets that are boards or markers are supported.") - - rotation_vector = rotation_vector.flatten() - translation_vector = translation_vector.flatten() - object_to_camera_matrix = numpy.identity(4, dtype="float32") - object_to_camera_matrix[0:3, 0:3] = Rotation.from_rotvec(rotation_vector).as_matrix() - object_to_camera_matrix[0:3, 3] = translation_vector[0:3] - object_to_detector_matrix = MathUtils.image_to_opengl_transformation_matrix(object_to_camera_matrix) - detector_to_reference_matrix: Matrix4x4 = ray_set.detector_to_reference_matrix - object_to_reference_matrix = numpy.matmul( - detector_to_reference_matrix.as_numpy_array(), object_to_detector_matrix) - position = list(object_to_reference_matrix[0:3, 3]) - quaternion = list(Rotation.from_matrix(object_to_reference_matrix[0:3, 0:3]).as_quat(canonical=True)) - return position, quaternion - - def _estimate_detector_pose_relative_to_reference(self): - image_point_sets_by_image_key, image_point_set_keys_with_reference_visible = self._update() - for image_point_sets_key, image_point_sets in image_point_sets_by_image_key.items(): - detector_label = image_point_sets_key.detector_label - board_image_point_sets = [ - image_point_set for image_point_set in image_point_sets - if image_point_set.marker_id in self._board_marker_ids - ] - - # Create a dictionary to map marker_id to its index in _board_marker_ids - marker_id_to_index = {marker_id: index for index, marker_id in enumerate(self._board_marker_ids)} - - # Sort the board_image_point_sets based on the order in _board_marker_ids - board_image_point_sets.sort(key=lambda x: marker_id_to_index[x.marker_id]) - - intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] - - image_points = [] - detected_marker_positions = [] - for image_point_set in board_image_point_sets: - image_points += image_point_set.points - detected_marker_positions.append( - self._board_marker_positions[marker_id_to_index[image_point_set.marker_id]]) - - if len(detected_marker_positions) == 0: - continue # Skip if no markers are detected - - half_width: float = self._board_marker_size / 2.0 - reference_points = [] - for position in detected_marker_positions: - single_reference_points = numpy.array([ - [position[0] + half_width, position[1] - half_width, 0.0], - [position[0] + half_width, position[1] + half_width, 0.0], - [position[0] - half_width, position[1] + half_width, 0.0], - [position[0] - half_width, position[1] - half_width, 0.0], - ]) - reference_points.extend(single_reference_points) - - reference_points = numpy.array(reference_points, dtype="float32") - reference_points = numpy.reshape(reference_points, newshape=(1, len(reference_points), 3)) - image_points = numpy.reshape(numpy.array(image_points, dtype="float32"), newshape=(1, len(image_points), 2)) - - reference_found: bool - rotation_vector: numpy.ndarray - translation_vector: numpy.ndarray - reference_found, rotation_vector, translation_vector = cv2.solvePnP( - objectPoints=reference_points, - imagePoints=image_points, - cameraMatrix=numpy.asarray(intrinsics.get_matrix(), dtype="float32"), - distCoeffs=numpy.asarray(intrinsics.get_distortion_coefficients(), dtype="float32")) - if not reference_found: - continue # Camera does not see reference board - - rotation_vector = rotation_vector.flatten() - translation_vector = translation_vector.flatten() - reference_to_camera_matrix = numpy.identity(4, dtype="float32") - reference_to_camera_matrix[0:3, 0:3] = Rotation.from_rotvec(rotation_vector).as_matrix() - reference_to_camera_matrix[0:3, 3] = translation_vector - reference_to_detector_matrix = MathUtils.image_to_opengl_transformation_matrix(reference_to_camera_matrix) - detector_to_reference_opengl = numpy.linalg.inv(reference_to_detector_matrix) - self._poses_by_detector_label[detector_label] = Matrix4x4.from_numpy_array(detector_to_reference_opengl) - - def _estimate_target_pose_relative_to_reference(self): - image_point_sets_by_image_key, image_point_set_keys_with_reference_visible = self._update() - valid_image_point_sets: list[MarkerCorners] = list() - for image_point_sets_key in image_point_set_keys_with_reference_visible: - image_point_sets = image_point_sets_by_image_key[image_point_sets_key] - for image_point_set in image_point_sets: - valid_image_point_sets.append(image_point_set) - - # Calculate rays - # Only the most recent points per detector/marker pair are used, - # so if we process the most recent first, we can detect and - # discard the older point sets and avoid unnecessary processing - valid_image_point_sets.sort(key=lambda x: x.timestamp, reverse=True) - for image_point_set in valid_image_point_sets: - image_timestamp = image_point_set.timestamp - marker_key = MarkerKey( - detector_label=image_point_set.detector_label, - marker_id=str(image_point_set.marker_id)) - if marker_key in self._marker_rayset_by_marker_key: - if self._marker_rayset_by_marker_key[marker_key].image_timestamp > image_timestamp: - continue # A newer timestamp was found in this iteration. Skip the older one. - for pose in self._detector_poses: - if pose.target_id == image_point_set.detector_label: - detector_to_reference_matrix: Matrix4x4 = pose.object_to_reference_matrix - self._marker_rayset_by_marker_key[marker_key] = self._calculate_marker_ray_set( - image_point_set=image_point_set, - detector_to_reference_matrix=detector_to_reference_matrix) - - # Create a dictionary that maps marker ID's to a list of *recent* rays - ray_sets_by_marker_id: dict[str, list[MarkerRaySet]] = dict() - for marker_key, marker_ray_set in self._marker_rayset_by_marker_key.items(): - if (self._now_timestamp - marker_ray_set.image_timestamp).total_seconds() > \ - self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS: - continue - marker_id = marker_key.marker_id - if marker_id not in ray_sets_by_marker_id: - ray_sets_by_marker_id[marker_id] = list() - ray_sets_by_marker_id[marker_id].append(marker_ray_set) - - # Sort rays by the size of quadrilateral. - # Larger marker size in image suggests more precision. - # After a certain number of intersections, - # there may be little point in processing additional (lower precision) ray sets. - for marker_id, ray_set_list in ray_sets_by_marker_id.items(): - ray_set_list.sort(key=lambda x: convex_quadrilateral_area(x.image_points), reverse=True) - ray_sets_by_marker_id[marker_id] = ray_set_list[0:self._parameters.MAXIMUM_RAY_COUNT_FOR_INTERSECTION] - - marker_count_by_marker_id: dict[str, int] = dict() - for marker_id, ray_set_list in ray_sets_by_marker_id.items(): - marker_count_by_marker_id[marker_id] = len(ray_set_list) - intersectable_marker_ids: list[str] = list() - nonintersectable_marker_ids: list[str] = list() - for marker_id, count in marker_count_by_marker_id.items(): - if count >= 2: - intersectable_marker_ids.append(marker_id) - else: - nonintersectable_marker_ids.append(marker_id) - - # intersect rays to find the 3D points for each marker corner in reference coordinates - corner_sets_reference_by_marker_id: dict[str, CornerSetReference] = dict() - rejected_intersection_marker_ids: list[str] = list() - for marker_id in intersectable_marker_ids: - intersections_appear_valid: bool = True # If something looks off, set this to False - ray_set_list: list[MarkerRaySet] = ray_sets_by_marker_id[marker_id] - corner_points_in_reference: list[list[float]] = list() - for corner_index in range(0, 4): - rays: list[Ray] = list() - if len(ray_set_list) == 0: - intersections_appear_valid = False - print("Warning: intersectable_marker_ids corresponds to no ray set list") - break - - for ray_set in ray_set_list: - rays.append(Ray( - source_point=ray_set.ray_origin_reference, - direction=ray_set.ray_directions_reference[corner_index])) - intersection_result = closest_intersection_between_n_lines( - rays=rays, - maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) - if intersection_result.centroids.shape[0] == 0: - intersections_appear_valid = False - break - else: - corner_points_in_reference.append(intersection_result.centroid()) - if not intersections_appear_valid: - rejected_intersection_marker_ids.append(marker_id) - continue - corner_sets_reference_by_marker_id[marker_id] = CornerSetReference( - marker_id=marker_id, - corners=corner_points_in_reference, - ray_sets=ray_set_list) - - # We estimate the pose of each target based on the calculated intersections - # and the rays projected from each detector - for target_id, target in self._targets.items(): - marker_ids_in_target: list[str] - if isinstance(target, TargetMarker): - marker_ids_in_target = [target.marker_id] - else: - raise NotImplementedError("Only targets that are markers are supported.") - - marker_ids_with_intersections: list[str] = list() - marker_ids_with_rays: list[str] = list() - for marker_id in marker_ids_in_target: - if marker_id in corner_sets_reference_by_marker_id: - marker_ids_with_intersections.append(marker_id) - elif marker_id in ray_sets_by_marker_id: # Don't include if we have (presumably precise) intersections - marker_ids_with_rays.append(marker_id) - - if len(marker_ids_with_intersections) <= 0 and len(marker_ids_with_rays) <= 0: - continue # No information on which to base a pose - - # Determine how many markers and how many detectors are involved - marker_id_set: set[str] = set() - one_detector_only: bool = True - detector_set: set[str] = set() - ray_sets: list[MarkerRaySet] = list() - for marker_id in marker_ids_with_intersections: - marker_id_set.add(corner_sets_reference_by_marker_id[marker_id].marker_id) - ray_sets += corner_sets_reference_by_marker_id[marker_id].ray_sets - one_detector_only = False - for marker_id in marker_ids_with_rays: - marker_id_set.add(marker_id) - ray_sets += ray_sets_by_marker_id[marker_id] - assert (len(marker_id_set) > 0) - one_marker_only: bool = len(marker_id_set) == 1 - - for ray_set in ray_sets: - detector_set.add(ray_set.detector_label) - one_detector_only &= (len(detector_set) == 1) - - # Try to find a solution for this matrix - object_to_reference_matrix: numpy.array = numpy.identity(4, dtype="float32") - - if one_detector_only and one_marker_only: - marker_id = marker_ids_with_rays[0] - ray_set = ray_sets_by_marker_id[marker_id][0] - position, orientation = self._estimate_target_pose_from_ray_set( - target=target, - ray_set=ray_set) - object_to_reference_matrix[0:3, 3] = position - object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(orientation).as_matrix() - - else: - # Fill in the required variables for the customized iterative closest point - initial_object_to_reference_estimated: bool = False - initial_object_to_reference_matrix = numpy.identity(4, dtype="float32") - object_known_points: list[list[float]] = list() - reference_known_points: list[list[float]] = list() - object_ray_points: list[list[float]] = list() - reference_rays: list[Ray] = list() - iterative_closest_point_parameters = IterativeClosestPointParameters( - termination_iteration_count=self._parameters.icp_termination_iteration_count, - termination_delta_translation=self._parameters.icp_termination_translation, - termination_delta_rotation_radians=self._parameters.icp_termination_rotation_radians, - termination_mean_point_distance=self._parameters.icp_termination_mean_point_distance, - termination_rms_point_distance=self._parameters.icp_termination_rms_point_distance) - - if len(marker_ids_with_intersections) >= 1: - reference_points_for_intersections: list[list[float]] = list() - for marker_id in marker_ids_with_intersections: - corner_set_reference = corner_sets_reference_by_marker_id[marker_id] - reference_points_for_intersections += corner_set_reference.corners - object_points_for_intersections = self._corresponding_point_list_in_target(target_id=target_id) - object_known_points += object_points_for_intersections - reference_known_points += reference_points_for_intersections - initial_object_to_reference_matrix = register_corresponding_points( - point_set_from=object_points_for_intersections, - point_set_to=reference_points_for_intersections) - initial_object_to_reference_estimated = True - - # pose estimation based on ArUco directly, used *only* for initial pose estimation - estimated_positions: list[list[float]] = list() - estimated_orientations: list[list[float]] = list() # quaternions - for marker_id in marker_ids_with_rays: - ray_set_list = ray_sets_by_marker_id[marker_id] - for ray_set in ray_set_list: - assert (len(ray_set.ray_directions_reference) == 4) - reference_rays_for_set: list[Ray] = list() - for corner_index in range(0, 4): - reference_rays_for_set.append(Ray( - source_point=ray_set.ray_origin_reference, - direction=ray_set.ray_directions_reference[corner_index])) - reference_rays += reference_rays_for_set - object_points_for_set = self._corresponding_point_list_in_target(target_id=target_id) - object_ray_points += object_points_for_set - if not initial_object_to_reference_estimated: - position, orientation = self._estimate_target_pose_from_ray_set(target, ray_set) - estimated_positions.append(position) - estimated_orientations.append(orientation) - if not initial_object_to_reference_estimated: - mean_position = numpy.array([0.0, 0.0, 0.0]) - for position in estimated_positions: - mean_position += position - mean_position /= len(estimated_positions) - initial_object_to_reference_matrix[0:3, 3] = mean_position - mean_orientation = average_quaternion(estimated_orientations) - initial_object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(mean_orientation).as_matrix() - - icp_output = iterative_closest_point_for_points_and_rays( - source_known_points=object_known_points, - target_known_points=reference_known_points, - source_ray_points=object_ray_points, - target_rays=reference_rays, - initial_transformation_matrix=initial_object_to_reference_matrix, - parameters=iterative_closest_point_parameters) - object_to_reference_matrix = icp_output.source_to_target_matrix - - # Compute a depth from each detector, - # find newest ray_set for each detector - newest_ray_set_by_detector_label: dict[str, MarkerRaySet] = dict() - for ray_set in ray_sets: - detector_label = ray_set.detector_label - if detector_label not in newest_ray_set_by_detector_label: - newest_ray_set_by_detector_label[detector_label] = ray_set - elif ray_set.image_timestamp > newest_ray_set_by_detector_label[detector_label].image_timestamp: - newest_ray_set_by_detector_label[detector_label] = ray_set - # Record depth - for detector_label in newest_ray_set_by_detector_label: - newest_ray_set = newest_ray_set_by_detector_label[detector_label] - target_depth_key = TargetDepthKey(target_id=target_id, detector_label=detector_label) - if target_depth_key not in self._target_depths_by_target_depth_key: - self._target_depths_by_target_depth_key[target_depth_key] = list() - detector_to_reference_matrix: Matrix4x4 = newest_ray_set.detector_to_reference_matrix - detector_position_reference: numpy.ndarray = detector_to_reference_matrix.as_numpy_array()[0:3, 3] - object_position_reference: numpy.array = object_to_reference_matrix[0:3, 3] - depth = numpy.linalg.norm(object_position_reference - detector_position_reference) - target_depth = TargetDepth( - target_id=target_id, - detector_label=detector_label, - estimate_timestamp=newest_ray_set.image_timestamp, - depth=depth) - self._target_depths_by_target_depth_key[target_depth_key].append(target_depth) - # If only visible to one camera, use the depth to denoise - if one_detector_only: - detector_label = detector_set.pop() - detector_to_reference_matrix: Matrix4x4 = \ - newest_ray_set_by_detector_label[detector_label].detector_to_reference_matrix - detector_position_reference = detector_to_reference_matrix.as_numpy_array()[0:3, 3] - target_position_reference = object_to_reference_matrix[0:3, 3] - depth_vector_reference = target_position_reference - detector_position_reference - old_depth = numpy.linalg.norm(depth_vector_reference) - target_depth_key = TargetDepthKey(target_id=target_id, detector_label=detector_label) - new_depth = float(numpy.average( - [target_depth.depth for target_depth in - self._target_depths_by_target_depth_key[ - target_depth_key]])) + self._parameters.POSE_SINGLE_CAMERA_DEPTH_CORRECTION - depth_factor = new_depth / old_depth - object_to_reference_matrix[0:3, 3] = detector_position_reference + depth_factor * depth_vector_reference - - pose = PoseData( - target_id=str(target_id), - object_to_reference_matrix=Matrix4x4.from_numpy_array(object_to_reference_matrix), - ray_sets=ray_sets) - - if target_id not in self._target_extrapolation_poses_by_target_id: - self._target_extrapolation_poses_by_target_id[target_id] = list() - self._target_extrapolation_poses_by_target_id[target_id].append(pose) - - self._poses_by_target_id[target_id] = pose - - def _update(self): - now_timestamp = datetime.datetime.utcnow() - self._now_timestamp = now_timestamp - poses_need_update: bool = self._clear_old_values(now_timestamp) - poses_need_update |= len(self._marker_corners_since_update) > 0 - if not poses_need_update: - return - - self._poses_by_target_id.clear() - - image_point_sets_by_image_key: dict[ImagePointSetsKey, list[MarkerCorners]] = dict() - for marker_corners in self._marker_corners_since_update: - detector_label = marker_corners.detector_label - image_point_sets_key = ImagePointSetsKey(detector_label, marker_corners.timestamp) - if image_point_sets_key not in image_point_sets_by_image_key: - image_point_sets_by_image_key[image_point_sets_key] = list() - image_point_sets_by_image_key[image_point_sets_key].append(marker_corners) - self._marker_corners_since_update.clear() - - image_point_set_keys_with_reference_visible: list[ImagePointSetsKey] = list() - for image_point_sets_key, image_point_sets in image_point_sets_by_image_key.items(): - image_point_set_keys_with_reference_visible.append(image_point_sets_key) - - return image_point_sets_by_image_key, image_point_set_keys_with_reference_visible - diff --git a/src/common/__init__.py b/src/common/__init__.py index c660b37..686dbea 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -1,3 +1,6 @@ +from .annotator import \ + Annotator, \ + MCTAnnotatorRuntimeError from .api import \ DequeueStatusMessagesRequest, \ DequeueStatusMessagesResponse, \ @@ -11,10 +14,57 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from .client_identifier_from_connection import client_identifier_from_connection -from .get_kwarg import get_kwarg -from .image_coding import ImageCoding -from .image_utils import ImageUtils -from .mct_component import MCTComponent -from .standard_resolutions import StandardResolutions -from .status_message_source import StatusMessageSource +from .calibration import \ + CalibrationErrorReason, \ + ExtrinsicCalibration, \ + ExtrinsicCalibrationDetectorResult, \ + ExtrinsicCalibrator, \ + IntrinsicCalibration, \ + IntrinsicCalibrator, \ + MCTCalibrationError +from .camera import \ + Camera, \ + MCTCameraRuntimeError +from .image_processing import \ + Annotation, \ + ImageFormat, \ + ImageResolution, \ + ImageUtils +from .math import \ + FeatureRay, \ + IntrinsicParameters, \ + IterativeClosestPointParameters, \ + Landmark, \ + MathUtils, \ + Matrix4x4, \ + Pose, \ + Ray, \ + Target +from .mct_component import \ + DetectorFrame, \ + MCTComponent, \ + MixerFrame +from .pose_solver import \ + PoseSolver, \ + PoseSolverException +from .serialization import \ + IOUtils, \ + KeyValueSimpleAbstract, \ + KeyValueSimpleAny, \ + KeyValueSimpleBool, \ + KeyValueSimpleString, \ + KeyValueSimpleFloat, \ + KeyValueSimpleInt, \ + KeyValueMetaAbstract, \ + KeyValueMetaAny, \ + KeyValueMetaBool, \ + KeyValueMetaEnum, \ + KeyValueMetaFloat, \ + KeyValueMetaInt, \ + MCTSerializationError, \ + MCTDeserializable +from .status import \ + MCTError, \ + SeverityLabel, \ + StatusMessage, \ + StatusMessageSource diff --git a/src/detector/interfaces/abstract_marker.py b/src/common/annotator.py similarity index 54% rename from src/detector/interfaces/abstract_marker.py rename to src/common/annotator.py index 879de65..54ddfbd 100644 --- a/src/detector/interfaces/abstract_marker.py +++ b/src/common/annotator.py @@ -1,29 +1,51 @@ -from ..structures import \ - MarkerConfiguration, \ - MarkerStatus -from src.common import StatusMessageSource -from src.common.structures import \ - MarkerSnapshot, \ - SeverityLabel, \ +from .image_processing import \ + Annotation +from .serialization import \ KeyValueMetaAny, \ KeyValueSimpleAny +from .status import \ + MCTError, \ + SeverityLabel, \ + StatusMessageSource import abc import datetime +from enum import StrEnum import numpy +from pydantic import BaseModel + + +class _Configuration(BaseModel): pass + +class _Status(StrEnum): + STOPPED = "STOPPED" + RUNNING = "RUNNING" + FAILURE = "FAILURE" -class AbstractMarker(abc.ABC): + +class MCTAnnotatorRuntimeError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class Annotator(abc.ABC): """ - Functions may raise MCTDetectorRuntimeError + Functions may raise MCTAnnotatorRuntimeError """ - _configuration: MarkerConfiguration - _status: MarkerStatus + Configuration: type[_Configuration] = _Configuration + Status: type[_Status] = _Status + + _configuration: Configuration + _status: Status _status_message_source: StatusMessageSource def __init__( self, - configuration: MarkerConfiguration, + configuration: Configuration, status_message_source: StatusMessageSource ): self._configuration = configuration @@ -36,20 +58,20 @@ def add_status_message( ) -> None: self._status_message_source.enqueue_status_message(severity=severity, message=message) - def get_status(self) -> MarkerStatus: + def get_status(self) -> Status: return self._status - def set_status(self, status: MarkerStatus) -> None: + def set_status(self, status: Status) -> None: self._status = status @abc.abstractmethod def get_changed_timestamp(self) -> datetime.datetime: ... @abc.abstractmethod - def get_markers_detected(self) -> list[MarkerSnapshot]: ... + def get_markers_detected(self) -> list[Annotation]: ... @abc.abstractmethod - def get_markers_rejected(self) -> list[MarkerSnapshot]: ... + def get_markers_rejected(self) -> list[Annotation]: ... @abc.abstractmethod def get_parameters(self) -> list[KeyValueMetaAny]: ... diff --git a/src/common/api.py b/src/common/api.py new file mode 100644 index 0000000..720270a --- /dev/null +++ b/src/common/api.py @@ -0,0 +1,102 @@ +from .serialization import MCTDeserializable +from .status import StatusMessage +import abc +from pydantic import BaseModel, Field, SerializeAsAny + + +class MCTRequest(BaseModel, MCTDeserializable, abc.ABC): + parsable_type: str + + +class MCTRequestSeries(BaseModel): + series: list[SerializeAsAny[MCTRequest]] = Field() + + +class MCTResponse(BaseModel, MCTDeserializable, abc.ABC): + parsable_type: str + + +class MCTResponseSeries(BaseModel): + series: list[SerializeAsAny[MCTResponse]] = Field(default=list()) + responder: str = Field(default=str()) + + +class EmptyResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "empty" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + +class ErrorResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "error" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + message: str = Field() + + +class DequeueStatusMessagesRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "dequeue_status_messages" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + +class DequeueStatusMessagesResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "dequeue_status_messages" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + status_messages: list[StatusMessage] = Field() + + +class TimeSyncStartRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "time_sync_start_request" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + +class TimeSyncStopRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "time_sync_stop_request" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + +class TimestampGetRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "timestamp_get_request" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + requester_timestamp_utc_iso8601: str = Field() + + +class TimestampGetResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "timestamp_get_response" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + requester_timestamp_utc_iso8601: str = Field() + responder_timestamp_utc_iso8601: str = Field() diff --git a/src/common/api/__init__.py b/src/common/api/__init__.py deleted file mode 100644 index 5805c52..0000000 --- a/src/common/api/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .dequeue_status_messages_request import DequeueStatusMessagesRequest -from .dequeue_status_messages_response import DequeueStatusMessagesResponse -from .empty_response import EmptyResponse -from .error_response import ErrorResponse -from .mct_request import MCTRequest -from .mct_request_series import MCTRequestSeries -from .mct_response import MCTResponse -from .mct_response_series import MCTResponseSeries -from .timestamp_get_request import TimestampGetRequest -from .timestamp_get_response import TimestampGetResponse -from .time_sync_start_request import TimeSyncStartRequest -from .time_sync_stop_request import TimeSyncStopRequest diff --git a/src/common/api/dequeue_status_messages_request.py b/src/common/api/dequeue_status_messages_request.py deleted file mode 100644 index d3eb040..0000000 --- a/src/common/api/dequeue_status_messages_request.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class DequeueStatusMessagesRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" - - @staticmethod - def parsable_type_identifier() -> str: - return DequeueStatusMessagesRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/dequeue_status_messages_response.py b/src/common/api/dequeue_status_messages_response.py deleted file mode 100644 index d6403f9..0000000 --- a/src/common/api/dequeue_status_messages_response.py +++ /dev/null @@ -1,17 +0,0 @@ -from .mct_response import MCTResponse -from src.common.structures.status_message import StatusMessage -from pydantic import Field -from typing import Final, Literal - - -class DequeueStatusMessagesResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" - - @staticmethod - def parsable_type_identifier() -> str: - return DequeueStatusMessagesResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - status_messages: list[StatusMessage] = Field() diff --git a/src/common/api/empty_response.py b/src/common/api/empty_response.py deleted file mode 100644 index 242149d..0000000 --- a/src/common/api/empty_response.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import Field -from typing import Final, Literal - - -class EmptyResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "empty" - - @staticmethod - def parsable_type_identifier() -> str: - return EmptyResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/error_response.py b/src/common/api/error_response.py deleted file mode 100644 index bf910fd..0000000 --- a/src/common/api/error_response.py +++ /dev/null @@ -1,16 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import Field -from typing import Final, Literal - - -class ErrorResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "error" - - @staticmethod - def parsable_type_identifier() -> str: - return ErrorResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - message: str = Field() diff --git a/src/common/api/mct_request.py b/src/common/api/mct_request.py deleted file mode 100644 index 359ec9d..0000000 --- a/src/common/api/mct_request.py +++ /dev/null @@ -1,7 +0,0 @@ -from ..structures.mct_parsable import MCTParsable -import abc -from pydantic import BaseModel - - -class MCTRequest(BaseModel, MCTParsable, abc.ABC): - parsable_type: str diff --git a/src/common/api/mct_request_series.py b/src/common/api/mct_request_series.py deleted file mode 100644 index 877054f..0000000 --- a/src/common/api/mct_request_series.py +++ /dev/null @@ -1,6 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import BaseModel, Field, SerializeAsAny - - -class MCTRequestSeries(BaseModel): - series: list[SerializeAsAny[MCTRequest]] = Field() diff --git a/src/common/api/mct_response.py b/src/common/api/mct_response.py deleted file mode 100644 index 9d8ba01..0000000 --- a/src/common/api/mct_response.py +++ /dev/null @@ -1,7 +0,0 @@ -from ..structures.mct_parsable import MCTParsable -import abc -from pydantic import BaseModel - - -class MCTResponse(BaseModel, MCTParsable, abc.ABC): - parsable_type: str diff --git a/src/common/api/mct_response_series.py b/src/common/api/mct_response_series.py deleted file mode 100644 index fd70fc0..0000000 --- a/src/common/api/mct_response_series.py +++ /dev/null @@ -1,7 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import BaseModel, Field, SerializeAsAny - - -class MCTResponseSeries(BaseModel): - series: list[SerializeAsAny[MCTResponse]] = Field(default=list()) - responder: str = Field(default=str()) diff --git a/src/common/api/time_sync_start_request.py b/src/common/api/time_sync_start_request.py deleted file mode 100644 index db45559..0000000 --- a/src/common/api/time_sync_start_request.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class TimeSyncStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "time_sync_start_request" - - @staticmethod - def parsable_type_identifier() -> str: - return TimeSyncStartRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/time_sync_stop_request.py b/src/common/api/time_sync_stop_request.py deleted file mode 100644 index 4942f89..0000000 --- a/src/common/api/time_sync_stop_request.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class TimeSyncStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "time_sync_stop_request" - - @staticmethod - def parsable_type_identifier() -> str: - return TimeSyncStopRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/timestamp_get_request.py b/src/common/api/timestamp_get_request.py deleted file mode 100644 index a3f9708..0000000 --- a/src/common/api/timestamp_get_request.py +++ /dev/null @@ -1,16 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class TimestampGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "timestamp_get_request" - - @staticmethod - def parsable_type_identifier() -> str: - return TimestampGetRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - requester_timestamp_utc_iso8601: str = Field() diff --git a/src/common/api/timestamp_get_response.py b/src/common/api/timestamp_get_response.py deleted file mode 100644 index 2699e81..0000000 --- a/src/common/api/timestamp_get_response.py +++ /dev/null @@ -1,17 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import Field -from typing import Final, Literal - - -class TimestampGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "timestamp_get_response" - - @staticmethod - def parsable_type_identifier() -> str: - return TimestampGetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - requester_timestamp_utc_iso8601: str = Field() - responder_timestamp_utc_iso8601: str = Field() diff --git a/src/common/calibration.py b/src/common/calibration.py new file mode 100644 index 0000000..a1ede2b --- /dev/null +++ b/src/common/calibration.py @@ -0,0 +1,813 @@ +from .image_processing import \ + ImageFormat, \ + ImageResolution, \ + ImageUtils +from .math import \ + IntrinsicParameters, \ + Matrix4x4 +from .serialization import \ + IOUtils +from .status import \ + MCTError +import abc +import datetime +from enum import StrEnum +import logging +import numpy +import os +from pydantic import BaseModel, Field, ValidationError +from typing import Final, Optional +import uuid + + +logger = logging.getLogger(__name__) + + +class CalibrationErrorReason(StrEnum): + INITIALIZATION: Final[str] = "initialization" + INVALID_INPUT: Final[str] = "invalid_input" + INVALID_STATE: Final[str] = "invalid_state" + DATA_NOT_FOUND: Final[str] = "data_not_found" + COMPUTATION_FAILURE: Final[str] = "computation_failure" + + +_PUBLIC_MESSAGE_KEY: Final[str] = "public_message" +_PRIVATE_MESSAGE_KEY: Final[str] = "private_message" + + +class MCTCalibrationError(MCTError): + public_message: str | None + private_message: str + reason: CalibrationErrorReason + + def __init__( + self, + reason: CalibrationErrorReason, + public_message: str | None = None, + private_message: str | None = None, + *args + ): + super().__init__(args) + self.reason = reason + self.public_message = public_message + self.private_message = private_message + if self.private_message is None and self.public_message is not None: + self.private_message = self.private_message + + +_RESULT_FORMAT: Final[str] = ".json" + + +# ===================================================================================================================== +# Internal structures applicable to both intrinsic and extrinsic calibrations +# ===================================================================================================================== + + +class _Configuration(BaseModel): + data_path: str = Field() + + +class _ImageState(StrEnum): + IGNORE = "ignore" + SELECT = "select" + DELETE = "delete" # stage for deletion + + +class _ImageMetadata(BaseModel): + identifier: str = Field() + filepath: str = Field() + detector_label: str = Field() + resolution: ImageResolution = Field() + label: str = Field(default_factory=str) # human-readable label + timestamp_utc_iso8601: str = Field( + default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + state: _ImageState = Field(default=_ImageState.SELECT) + + def is_selected(self): + return self.state == _ImageState.SELECT + + +class _ResultState(StrEnum): + ACTIVE = "active" # will be stored AND marked for use. Only one result expected to be active per image resolution. + RETAIN = "retain" # store, but do not use + DELETE = "delete" # stage for deletion + + +class _ResultMetadata(BaseModel): + identifier: str = Field() + filepath: str = Field() + resolution: ImageResolution | None = Field(default=None) # Used in intrinsic, not currently used in extrinsic + label: str = Field(default_factory=str) + timestamp_utc_iso8601: str = Field( + default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + image_identifiers: list[str] = Field(default_factory=list) + state: _ResultState = Field(default=_ResultState.RETAIN) + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +class _DataLedger(BaseModel): + image_metadata_list: list[_ImageMetadata] = Field(default_factory=list) + result_metadata_list: list[_ResultMetadata] = Field(default_factory=list) + + +class AbstractCalibrator(abc.ABC): + + _data_path: str + + _DATA_LEDGER_FILENAME: Final[str] = "data_ledger.json" + _data_ledger: _DataLedger + _data_ledger_filepath: str + + def __init__( + self, + configuration: _Configuration + ): + self._data_path = configuration.data_path + if not self._exists_on_filesystem(path=self._data_path, pathtype="path", create_path=True): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INITIALIZATION, + public_message="Data path does not exist and could not be created.", + private_message=f"{self._data_path} does not exist and could not be created.") + + self._data_ledger_filepath = os.path.join(self._data_path, AbstractCalibrator._DATA_LEDGER_FILENAME) + self._load_data_ledger() + + def _add_image( + self, + image: numpy.ndarray, + metadata: _ImageMetadata, + ) -> None: + """ + Helper for saving images consistently across different types of calibrators + Returns true if successful, False otherwise. + """ + # Before making any changes to the data ledger, make sure folders exist + if not self._exists_on_filesystem(path=os.path.dirname(metadata.filepath), pathtype="path", create_path=True): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Failed to create storage location for input image.") + # Also make sure that this file does not somehow already exist (highly unlikely) + if os.path.exists(metadata.filepath): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Image appears to already exist. This is never expected to occur. " + "Please try again, and if this error continues to occur then please report a bug.", + private_message=f"Image {metadata.filepath} appears to already exist. This is never expected to occur.") + image_bytes: bytes + image_bytes = ImageUtils.image_to_bytes(image_data=image, image_format=ImageFormat.FORMAT_PNG) + try: + with (open(metadata.filepath, 'wb') as in_file): + in_file.write(image_bytes) + except IOError as e: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Failed to save image - see local log for more details.", + private_message=f"Failed to save image to {metadata.filepath}, reason: {str(e)}.") + self._data_ledger.image_metadata_list.append(metadata) + self._save_data_ledger() + + def _add_result( + self, + result: dict, + metadata: _ResultMetadata + ) -> None: + self._save_dict_to_filepath( + filepath=metadata.filepath, + json_dict=result, + ignore_none=True) + self._data_ledger.result_metadata_list.append(metadata) + self._save_data_ledger() + + @staticmethod + def _delete_file_if_exists(filepath: str): + try: + os.remove(filepath) + except FileNotFoundError: + pass + except OSError as e: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Failed to remove a file from the calibrator due to an unexpected reason. " + f"See local log for details.", + private_message=f"Failed to delete file {filepath} for reason: {str(e)}.") + + # noinspection DuplicatedCode + def delete_staged(self) -> None: + image_indices_to_delete: list = list() + image_metadata: _ImageMetadata + for image_index, image_metadata in enumerate(self._data_ledger.image_metadata_list): + if image_metadata.state == _ImageState.DELETE: + self._delete_file_if_exists(image_metadata.filepath) + image_indices_to_delete.append(image_index) + for i in reversed(image_indices_to_delete): + del self._data_ledger.image_metadata_list[i] + result_indices_to_delete: list = list() + result_metadata: _ResultMetadata + for result_index, result_metadata in enumerate(self._data_ledger.result_metadata_list): + if result_metadata.state == _ResultState.DELETE: + self._delete_file_if_exists(result_metadata.filepath) + result_indices_to_delete.append(result_index) + for i in reversed(result_indices_to_delete): + del self._data_ledger.result_metadata_list[i] + self._save_data_ledger() + + @staticmethod + def _exists_on_filesystem( + path: str, + pathtype: IOUtils.PathType, + create_path: bool = False + ) -> bool: + errors: dict[str, str] = dict() + return_value: bool = IOUtils.exists( + path=path, + pathtype=pathtype, + create_path=create_path, + on_error_for_user=lambda msg: errors.__setitem__(_PUBLIC_MESSAGE_KEY, msg), + on_error_for_dev=lambda msg: errors.__setitem__(_PRIVATE_MESSAGE_KEY, msg)) + if len(errors) > 0: + logger.error(errors[_PRIVATE_MESSAGE_KEY]) + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Error determining if a file exists on the file system; See local log for details.", + private_message=errors[_PRIVATE_MESSAGE_KEY]) + return return_value + + def get_image_by_identifier( + self, + identifier: str + ) -> str: + return self._load_image(identifier=identifier) + + # noinspection DuplicatedCode + def _get_result_metadata_by_identifier( + self, + identifier: str + ) -> _ResultMetadata: + match_count: int = 0 + matching_result_metadata: _ResultMetadata | None = None + for result_metadata in self._data_ledger.result_metadata_list: + if result_metadata.identifier == identifier: + match_count += 1 + matching_result_metadata = result_metadata + break + if match_count < 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + public_message=f"Identifier {identifier} is not associated with any result.") + elif match_count > 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Identifier {identifier} is associated with multiple results.") + return matching_result_metadata + + def list_image_metadata(self) -> list[_ImageMetadata]: + return list(self._data_ledger.image_metadata_list) + + def list_result_metadata(self) -> list[_ResultMetadata]: + return list(self._data_ledger.result_metadata_list) + + def _load_data_ledger(self) -> None: + """ + :return: True if loaded or if it can be created without overwriting existing data. False otherwise. + """ + json_dict: dict + json_dict = self._load_dict_from_filepath(filepath=self._data_ledger_filepath) + try: + self._data_ledger = _DataLedger(**json_dict) + except ValidationError as e: + logger.error(e) + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Error loading the data ledger; See local log for details.", + private_message=str(e)) + + @staticmethod + def _load_dict_from_filepath( + filepath: str + ) -> dict: + """ + :return: dict containing existing data (or empty if no data exists) + """ + if not os.path.exists(filepath): + return dict() # Not considered an error, just doesn't exist yet + elif not os.path.isfile(filepath): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="A file failed to load. See local log for details.", + private_message=f"JSON at {filepath} exists but is not a file. " + "Most likely a directory exists at that location, " + "and it needs to be manually removed.") + errors: dict[str, str] = dict() + json_dict: dict = IOUtils.hjson_read( + filepath=filepath, + on_error_for_user=lambda msg: errors.__setitem__(_PUBLIC_MESSAGE_KEY, msg), + on_error_for_dev=lambda msg: errors.__setitem__(_PRIVATE_MESSAGE_KEY, msg)) + if not json_dict: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Error loading data; See local log for details.", + private_message=errors[_PRIVATE_MESSAGE_KEY]) + return json_dict + + # noinspection DuplicatedCode + def _load_image( + self, + identifier: str + ) -> str: # image in base64 + match_count: int = 0 + matching_metadata: _ImageMetadata | None = None + for image_metadata in self._data_ledger.image_metadata_list: + if image_metadata.identifier == identifier: + match_count += 1 + matching_metadata = image_metadata + break + if match_count < 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + private_message=f"Identifier {identifier} is not associated with any image.") + elif match_count > 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Identifier {identifier} is associated with multiple images.") + + if not os.path.exists(matching_metadata.filepath): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"File does not exist for image {identifier}.") + image_bytes: bytes + try: + with (open(matching_metadata.filepath, 'rb') as in_file): + image_bytes = in_file.read() + except OSError: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Failed to open image {identifier}.") + image_base64 = ImageUtils.bytes_to_base64(image_bytes=image_bytes) + return image_base64 + + def _load_result( + self, + identifier: str, + result_type: type[BaseModel] + ) -> ...: + metadata: _ResultMetadata = self._get_result_metadata_by_identifier(identifier=identifier) + return self._load_result_by_metadata(metadata=metadata, result_type=result_type) + + def _load_result_by_metadata( + self, + metadata: _ResultMetadata, + result_type: type[BaseModel] + ) -> ...: + """ + Read the calibration result corresponding to the provided metadata. + """ + json_dict: dict + load_success: bool + json_dict = self._load_dict_from_filepath(metadata.filepath) + result: result_type = result_type(**json_dict) + return result + + def _save_data_ledger(self) -> None: + return self._save_dict_to_filepath( + filepath=self._data_ledger_filepath, + json_dict=self._data_ledger.model_dump()) + + @staticmethod + def _save_dict_to_filepath( + filepath: str, + json_dict: dict, + ignore_none: bool = False + ) -> None: + """ + :param filepath: Where to write the file + :param json_dict: What to write to the file + :param ignore_none: See IOUtils.json_write + """ + errors: dict[str, str] = dict() + IOUtils.json_write( + filepath=filepath, + json_dict=json_dict, + on_error_for_user=lambda msg: errors.__setitem__(_PUBLIC_MESSAGE_KEY, msg), + on_error_for_dev=lambda msg: errors.__setitem__(_PRIVATE_MESSAGE_KEY, msg), + ignore_none=ignore_none) + if len(errors) > 0: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Error saving data; See local log for more details.", + private_message=errors[_PRIVATE_MESSAGE_KEY]) + + # noinspection DuplicatedCode + def update_image_metadata( + self, + image_identifier: str, + image_state: _ImageState, + image_label: str | None + ) -> None: + match_count: int = 0 + matched_metadata: _ImageMetadata | None = None + for metadata in self._data_ledger.image_metadata_list: + if metadata.identifier == image_identifier: + match_count += 1 + matched_metadata = metadata + if match_count < 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + private_message=f"Identifier {image_identifier} is not associated with any image.") + elif match_count > 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Identifier {image_identifier} is associated with multiple images. " + "This suggests that the data ledger is in an inconsistent state. " + "It may be prudent to either manually correct it, or recreate it.") + matched_metadata.state = image_state + if image_label is not None: + matched_metadata.label = image_label + self._save_data_ledger() + + # noinspection DuplicatedCode + def update_result_metadata( + self, + result_identifier: str, + result_state: _ResultState, + result_label: str | None = None + ) -> None: + match_count: int = 0 + matched_metadata: _ResultMetadata | None = None + for metadata in self._data_ledger.result_metadata_list: + if metadata.identifier == result_identifier: + match_count += 1 + matched_metadata = metadata + if match_count < 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + private_message=f"Identifier {result_identifier} is not associated with any result.") + elif match_count > 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Identifier {result_identifier} is associated with multiple results. " + "This suggests that the data ledger is in an inconsistent state. " + "Please manually correct it, or recreate it.") + matched_metadata.state = result_state + if result_label is not None: + matched_metadata.label = result_label + self._save_data_ledger() + + +# ===================================================================================================================== +# Intrinsic calibration +# ===================================================================================================================== + + +class IntrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + image_resolution: ImageResolution = Field() + calibrated_values: IntrinsicParameters = Field() + supplemental_data: dict = Field() + + +class IntrinsicCalibrator(AbstractCalibrator, abc.ABC): + Configuration: type[_Configuration] = _Configuration + ImageState: type[_ImageState] = _ImageState + ImageMetadata: type[_ImageMetadata] = _ImageMetadata + ResultState: type[_ResultState] = _ResultState + ResultMetadata: type[_ResultMetadata] = _ResultMetadata + + def __init__( + self, + configuration: Configuration, + ): + super().__init__(configuration=configuration) + + # noinspection DuplicatedCode + def add_image( + self, + image_base64: str, + detector_label: str = "", + timestamp_utc_iso8601: str | None = None + ) -> str: # id of image + if timestamp_utc_iso8601 is None: + timestamp_utc_iso8601 = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() + image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") + identifier: str = str(uuid.uuid4()) + resolution: ImageResolution = ImageResolution(x_px=image.shape[1], y_px=image.shape[0]) + filepath = os.path.join(self._data_path, str(resolution), identifier + ImageFormat.FORMAT_PNG) + metadata: _ImageMetadata = IntrinsicCalibrator.ImageMetadata( + identifier=identifier, + filepath=filepath, + detector_label=detector_label, + resolution=resolution, + timestamp_utc_iso8601=timestamp_utc_iso8601) + self._add_image( + image=image, + metadata=metadata) + return metadata.identifier + + def calculate( + self, + image_resolution: ImageResolution + ) -> tuple[str, IntrinsicCalibration]: + """ + :returns: a tuple containing a result identifier (GUID as string) and the IntrinsicCalibration structure + """ + + image_metadata_list: list[_ImageMetadata] = list() # image metadata available for calibration + for image_index, image_metadata in enumerate(self._data_ledger.image_metadata_list): + if image_metadata.resolution != image_resolution: + continue + if image_metadata.state != _ImageState.SELECT: + continue + if not self._exists_on_filesystem(path=image_metadata.filepath, pathtype="filepath"): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="An image failed to load. " + "suggesting that the data ledger is in an inconsistent state. " + "Please see the locaL log for details.", + private_message=f"Image {image_metadata.identifier} was not found. " + "This suggests that the data ledger is in an inconsistent state. " + "Please correct the data ledger.") + image_metadata_list.append(image_metadata) + + if len(image_metadata_list) == 0: + raise MCTCalibrationError( + reason=CalibrationErrorReason.COMPUTATION_FAILURE, + public_message=f"No images found for resolution {str(image_resolution)}.") + + intrinsic_calibration, image_metadata_list = self._calculate_implementation( + image_resolution=image_resolution, + image_metadata_list=image_metadata_list) + + result_identifier: str = str(uuid.uuid4()) + result_filepath = \ + os.path.join(self._data_path, str(image_resolution), result_identifier + _RESULT_FORMAT) + result_metadata: IntrinsicCalibrator.ResultMetadata = IntrinsicCalibrator.ResultMetadata( + identifier=result_identifier, + filepath=result_filepath, + resolution=image_resolution, + image_identifiers=[image_metadata.identifier for image_metadata in image_metadata_list]) + self._add_result( + result=intrinsic_calibration.model_dump(), + metadata=result_metadata) + + # For now, assume that the user's intent is to set any new calibration to be the active one + self.update_result_metadata( + result_identifier=result_metadata.identifier, + result_state=_ResultState.ACTIVE) + + return result_identifier, intrinsic_calibration + + @abc.abstractmethod + def _calculate_implementation( + self, + image_resolution: ImageResolution, + image_metadata_list: list[ImageMetadata] + ) -> tuple[IntrinsicCalibration, list[ImageMetadata]]: # metadata of images that were actually used in calibration + pass + + def get_result( + self, + result_identifier: str + ) -> IntrinsicCalibration: + return self._load_result( + identifier=result_identifier, + result_type=IntrinsicCalibration) + + def get_result_active_by_image_resolution( + self, + image_resolution: ImageResolution, + ) -> Optional[...]: + match_count: int = 0 + matched_metadata: IntrinsicCalibrator.ResultMetadata | None = None + for result_metadata in self._data_ledger.result_metadata_list: + if result_metadata.state == _ResultState.ACTIVE and result_metadata.resolution == image_resolution: + matched_metadata = result_metadata + match_count += 1 + + if match_count < 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + public_message=f"No result metadata is active for resolution {str(image_resolution)}. " + "Please ensure one has been selected, then try again.") + if match_count > 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Multiple result metadata are active for resolution {str(image_resolution)}. " + "To recover from this ambiguous state, explicitly set " + "one of the results as \"active\", which will reset others to \"retain\".") + + return self._load_result_by_metadata( + metadata=matched_metadata, + result_type=IntrinsicCalibration) + + def list_resolutions(self) -> list[ImageResolution]: + resolutions_as_str: set[str] = {str(metadata.resolution) for metadata in self._data_ledger.image_metadata_list} + resolutions: list[ImageResolution] = [ImageResolution.from_str(resolution) for resolution in resolutions_as_str] + return resolutions + + def list_image_metadata_by_image_resolution( + self, + image_resolution: ImageResolution + ) -> list[ImageMetadata]: + image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] = list() + for metadata in self._data_ledger.image_metadata_list: + if metadata.resolution == image_resolution: + image_metadata_list.append(metadata) + return image_metadata_list + + def list_result_metadata_by_image_resolution( + self, + image_resolution: ImageResolution + ) -> list[ResultMetadata]: + result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] = list() + for metadata in self._data_ledger.result_metadata_list: + if metadata.resolution == image_resolution: + result_metadata_list.append(metadata) + return result_metadata_list + + def update_result_metadata( + self, + result_identifier: str, + result_state: ResultState, + result_label: str | None = None + ) -> None: + super().update_result_metadata( + result_identifier=result_identifier, + result_state=result_state, + result_label=result_label) + + # Some cleanup as applicable + if result_state == _ResultState.ACTIVE: + matching_metadata: _ResultMetadata = self._get_result_metadata_by_identifier(identifier=result_identifier) + for metadata in self._data_ledger.result_metadata_list: + if metadata.resolution == matching_metadata.resolution and \ + metadata.identifier != matching_metadata.identifier: + metadata.state = _ResultState.RETAIN # Only one ACTIVE per resolution + self._save_data_ledger() + + +# ===================================================================================================================== +# Extrinsic calibration +# ===================================================================================================================== + + +class ExtrinsicCalibrationDetectorResult(BaseModel): + detector_label: str = Field() + detector_to_reference: Matrix4x4 = Field() + + +class ExtrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + calibrated_values: list[ExtrinsicCalibrationDetectorResult] = Field() + supplemental_data: dict = Field() + + +class ExtrinsicCalibrator(AbstractCalibrator, abc.ABC): + Configuration: type[_Configuration] = _Configuration + ImageState: type[_ImageState] = _ImageState + ImageMetadata: type[_ImageMetadata] = _ImageMetadata + ResultState: type[_ResultState] = _ResultState + ResultMetadata: type[_ResultMetadata] = _ResultMetadata + + detector_intrinsics_by_label: dict[str, IntrinsicParameters] + + def __init__( + self, + configuration: Configuration | dict + ): + if isinstance(configuration, dict): + configuration = ExtrinsicCalibrator.Configuration(**configuration) + self.detector_intrinsics_by_label = dict() + super().__init__(configuration=configuration) + + # noinspection DuplicatedCode + def add_image( + self, + image_base64: str, + detector_label: str, + timestamp_utc_iso8601: str + ) -> str: # id of image + image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") + identifier: str = str(uuid.uuid4()) + resolution: ImageResolution = ImageResolution(x_px=image.shape[1], y_px=image.shape[0]) + filepath = os.path.join(self._data_path, str(resolution), identifier + ImageFormat.FORMAT_PNG) + metadata: _ImageMetadata = ExtrinsicCalibrator.ImageMetadata( + identifier=identifier, + filepath=filepath, + detector_label=detector_label, + resolution=resolution, + timestamp_utc_iso8601=timestamp_utc_iso8601) + self._add_image( + image=image, + metadata=metadata) + return metadata.identifier + + def calculate( + self + ) -> tuple[str, ExtrinsicCalibration]: + """ + :returns: a tuple containing a result identifier (GUID as string) and the ExtrinsicCalibration structure + """ + + # if detector_labels != list(set(detector_labels)): + # raise MCTIntrinsicCalibrationError(message=f"Detector labels must not contain duplicated elements.") + # if len(detector_labels) != len(detector_intrinsics): + # raise MCTIntrinsicCalibrationError(message=f"Expected detector labels and intrinsics to be of same size.") + # detector_intrinsics_by_label: dict[str, IntrinsicParameters] = dict(zip(detector_labels, detector_intrinsics)) + + image_metadata_list: list[_ImageMetadata] = list() # image metadata available for calibration + for image_index, image_metadata in enumerate(self._data_ledger.image_metadata_list): + if image_metadata.state != _ImageState.SELECT: + continue + if not self._exists_on_filesystem(path=image_metadata.filepath, pathtype="filepath"): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="An image failed to load. " + "suggesting that the data ledger is in an inconsistent state. " + "Please see the locaL log for details.", + private_message=f"Image {image_metadata.identifier} was not found. " + "This suggests that the data ledger is in an inconsistent state. " + "Please correct the data ledger.") + image_metadata_list.append(image_metadata) + + # This is a check to make sure that there are no duplicates over any (timestamp, detector_label) + identifiers: list[tuple[str, str]] = [ + (metadata.timestamp_utc_iso8601, metadata.detector_label) + for metadata in image_metadata_list] + if len(identifiers) != len(set(identifiers)): + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Duplicate data were detected over (timestamp, detector_label).") + + if len(image_metadata_list) == 0: + raise MCTCalibrationError( + reason=CalibrationErrorReason.COMPUTATION_FAILURE, + public_message=f"No images found for calibration.") + + extrinsic_calibration, image_metadata_list = self._calculate_implementation( + image_metadata_list=image_metadata_list) + + result_identifier: str = str(uuid.uuid4()) + result_filepath = os.path.join(self._data_path, result_identifier + _RESULT_FORMAT) + result_metadata: ExtrinsicCalibrator.ResultMetadata = ExtrinsicCalibrator.ResultMetadata( + identifier=result_identifier, + filepath=result_filepath, + image_identifiers=[image_metadata.identifier for image_metadata in image_metadata_list]) + self._add_result( + result=extrinsic_calibration.model_dump(), + metadata=result_metadata) + + # For now, assume that the user's intent is to set any new calibration to be the active one + self.update_result_metadata( + result_identifier=result_metadata.identifier, + result_state=_ResultState.ACTIVE) + + return result_identifier, extrinsic_calibration + + def intrinsic_parameters_update( + self, + detector_label: str, + intrinsic_parameters: IntrinsicParameters + ) -> None: + self.detector_intrinsics_by_label[detector_label] = intrinsic_parameters + + @abc.abstractmethod + def _calculate_implementation( + self, + image_metadata_list: list[ImageMetadata] + ): + pass + + def get_result( + self, + result_identifier: str + ) -> ExtrinsicCalibration: + return self._load_result( + identifier=result_identifier, + result_type=ExtrinsicCalibration) + + def get_result_active( + self + ) -> Optional[ExtrinsicCalibration]: + match_count: int = 0 + matched_metadata: IntrinsicCalibrator.ResultMetadata | None = None + for result_metadata in self._data_ledger.result_metadata_list: + if result_metadata.state == _ResultState.ACTIVE: + matched_metadata = result_metadata + match_count += 1 + + if match_count < 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + public_message=f"No result metadata is active. " + "Please ensure one has been selected, then try again.") + if match_count > 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Multiple result metadata are active. " + "To recover from this ambiguous state, explicitly set " + "one of the results as \"active\", which will reset others to \"retain\".") + + return self._load_result_by_metadata( + metadata=matched_metadata, + result_type=IntrinsicCalibration) diff --git a/src/detector/interfaces/abstract_camera.py b/src/common/camera.py similarity index 63% rename from src/detector/interfaces/abstract_camera.py rename to src/common/camera.py index 7df2fc0..d796301 100644 --- a/src/detector/interfaces/abstract_camera.py +++ b/src/common/camera.py @@ -1,32 +1,54 @@ -from ..structures import \ - CameraConfiguration, \ - CameraStatus -from src.common import StatusMessageSource -from src.common.structures import \ - CaptureFormat, \ - ImageResolution, \ +from .image_processing import \ + ImageFormat, \ + ImageResolution +from .serialization import \ KeyValueSimpleAny, \ - KeyValueMetaAbstract, \ - SeverityLabel + KeyValueMetaAbstract +from .status import \ + MCTError, \ + SeverityLabel, \ + StatusMessageSource import abc import base64 import cv2 import datetime +from enum import StrEnum import numpy +from pydantic import BaseModel -class AbstractCamera(abc.ABC): +class _Configuration(BaseModel): pass + + +class _Status(StrEnum): + STOPPED = "STOPPED" + RUNNING = "RUNNING" + FAILURE = "FAILURE" + + +class MCTCameraRuntimeError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class Camera(abc.ABC): """ - Functions may raise MCTDetectorRuntimeError + Functions may raise MCTCameraRuntimeError """ - _configuration: CameraConfiguration - _status: CameraStatus + Status: type[_Status] = _Status + Configuration: type[_Configuration] = _Configuration + + _configuration: Configuration + _status: Status _status_message_source: StatusMessageSource def __init__( self, - configuration: CameraConfiguration, + configuration: Configuration, status_message_source: StatusMessageSource ): self._configuration = configuration @@ -44,10 +66,11 @@ def add_status_message( def get_encoded_image( self, - image_format: CaptureFormat, + image_format: ImageFormat, requested_resolution: ImageResolution | None # None means to not alter the image dimensions - ) -> str: + ) -> tuple[str, ImageResolution]: # ID, original_resolution image: numpy.ndarray = self.get_image() + original_resolution: ImageResolution = ImageResolution(x_px=image.shape[1], y_px=image.shape[0]) if requested_resolution is not None: image = cv2.resize(src=image, dsize=(requested_resolution.x_px, requested_resolution.y_px)) encoded_frame: bool @@ -56,12 +79,12 @@ def get_encoded_image( encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() # noinspection PyTypeChecker encoded_image_rgb_base64: str = base64.b64encode(encoded_image_rgb_bytes) - return encoded_image_rgb_base64 + return encoded_image_rgb_base64, original_resolution - def get_status(self) -> CameraStatus: + def get_status(self) -> Status: return self._status - def set_status(self, status: CameraStatus) -> None: + def set_status(self, status: Status) -> None: self._status = status @abc.abstractmethod diff --git a/src/common/client_identifier_from_connection.py b/src/common/client_identifier_from_connection.py deleted file mode 100644 index a6b05e6..0000000 --- a/src/common/client_identifier_from_connection.py +++ /dev/null @@ -1,7 +0,0 @@ -from fastapi import Request, WebSocket - - -def client_identifier_from_connection( - connection: Request | WebSocket -) -> str: - return f"{connection.client.host}:{connection.client.port}" diff --git a/src/common/exceptions/__init__.py b/src/common/exceptions/__init__.py deleted file mode 100644 index f5f7ff8..0000000 --- a/src/common/exceptions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .mct_error import MCTError -from .mct_parsing_error import MCTParsingError diff --git a/src/common/exceptions/mct_error.py b/src/common/exceptions/mct_error.py deleted file mode 100644 index 97abf2f..0000000 --- a/src/common/exceptions/mct_error.py +++ /dev/null @@ -1,3 +0,0 @@ -class MCTError(Exception): - def __init__(self, *args): - super().__init__(*args) diff --git a/src/common/exceptions/mct_parsing_error.py b/src/common/exceptions/mct_parsing_error.py deleted file mode 100644 index a550f2c..0000000 --- a/src/common/exceptions/mct_parsing_error.py +++ /dev/null @@ -1,13 +0,0 @@ -from .mct_error import MCTError - - -class MCTParsingError(MCTError): - message: str - - def __init__( - self, - message: str, - *args - ): - super().__init__(args) - self.message = message diff --git a/src/common/get_kwarg.py b/src/common/get_kwarg.py deleted file mode 100644 index 851e431..0000000 --- a/src/common/get_kwarg.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import TypeVar -T = TypeVar("T") - - -def get_kwarg( - kwargs: dict, - key: str, - arg_type: type[T], - required: bool = True -) -> T | None: - """ - :param kwargs: kwargs as a dict (without the "**") - :param key: key to search for - :param arg_type: expected type - :param required: If the keyword does not exist, then: - required == True -> Raise ValueError - required == False -> Return None - """ - - if key not in kwargs: - if required: - raise ValueError(f"Missing required key {key} in keyword arguments.") - return None - value: T = kwargs[key] - if not isinstance(value, arg_type): - raise ValueError( - f"Expected keyword argument {key} to be of type {arg_type.__name__}, " - f"but got {type(value).__name__}.") - return value diff --git a/src/common/image_coding.py b/src/common/image_coding.py deleted file mode 100644 index b9ed6d1..0000000 --- a/src/common/image_coding.py +++ /dev/null @@ -1,84 +0,0 @@ -from .structures import CaptureFormat -import base64 -import cv2 -import logging -import numpy -from typing import Literal - - -logger = logging.getLogger(__file__) - -ColorMode = Literal["color", "greyscale"] - - -class ImageCoding: - """ - A "class" to group related static functions, like in a namespace. - The class itself is not meant to be instantiated. - """ - - def __init__(self): - raise RuntimeError("ImageCoding is not meant to be instantiated.") - - @staticmethod - def base64_to_image( - input_base64: str, - color_mode: ColorMode = "color" - ) -> numpy.ndarray: - """ - Assumes 8 bits per component - """ - - image_bytes: bytes = base64.b64decode(s=input_base64) - - color_flag: int = 0 - if color_mode == "color": - color_flag |= cv2.IMREAD_COLOR - elif color_mode == "greyscale": - color_flag |= cv2.IMREAD_GRAYSCALE - else: - logger.warning(f"Unsupported color mode specified: {color_mode}") - - opencv_image: numpy.ndarray = cv2.imdecode( - buf=numpy.frombuffer( - buffer=image_bytes, - dtype=numpy.uint8), - flags=color_flag) - return opencv_image - - @staticmethod - def bytes_to_base64( - image_bytes: bytes - ) -> str: - return base64.b64encode(image_bytes).decode("ascii") - - @staticmethod - def image_to_base64( - image_data: numpy.ndarray, - image_format: CaptureFormat = ".png", - ) -> str: - """ - :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) - :param image_format: e.g. ".jpg", ".png"... - :return: base64 string representing the image - """ - encoded_image_rgb_bytes: bytes = ImageCoding.image_to_bytes( - image_data=image_data, - image_format=image_format) - encoded_image_rgb_base64: str = ImageCoding.bytes_to_base64(encoded_image_rgb_bytes) - return encoded_image_rgb_base64 - - @staticmethod - def image_to_bytes( - image_data: numpy.ndarray, - image_format: CaptureFormat = ".png", - ) -> bytes: - """ - :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) - :param image_format: e.g. ".jpg", ".png"... - :return: base64 string representing the image - """ - encoded_image_rgb_single_row: numpy.array - encoded, encoded_image_rgb_single_row = cv2.imencode(image_format, image_data) - encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() - return encoded_image_rgb_bytes diff --git a/src/common/image_processing.py b/src/common/image_processing.py new file mode 100644 index 0000000..2d1fb46 --- /dev/null +++ b/src/common/image_processing.py @@ -0,0 +1,243 @@ +import base64 +import cv2 +from enum import StrEnum +import logging +import numpy +from pydantic import BaseModel, Field +from typing import ClassVar, Literal, Final + + +logger = logging.getLogger(__file__) + +ColorMode = Literal["color", "greyscale"] + + +class Annotation(BaseModel): + """ + A distinct point as detected on a detector image. + """ + + # These can denote that multiple landmarks are related if they share the same + # "base label" (the part before the first and only occurrence of this character). + RELATION_CHARACTER: ClassVar[str] = "$" + + UNIDENTIFIED_LABEL: ClassVar[str] = str() + + feature_label: str = Field() # Empty indicates that something was detected but not identified + x_px: float = Field() + y_px: float = Field() + + def base_feature_label(self) -> str: + """ + Part of the label before the RELATION_CHARACTER. + """ + if self.RELATION_CHARACTER not in self.feature_label: + return self.feature_label + return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] + + +class ImageFormat(StrEnum): + FORMAT_PNG = ".png" + FORMAT_JPG = ".jpg" + + +class ImageResolution(BaseModel): + x_px: int = Field() + y_px: int = Field() + + def __eq__(self, other) -> bool: + if type(self) is not type(other): + return False + return \ + self.x_px == other.x_px and \ + self.y_px == other.y_px + + def __hash__(self) -> int: + return hash(str(self)) + + def __lt__(self, other): + if not isinstance(other, ImageResolution): + raise ValueError() + if self.x_px < other.x_px: + return True + elif self.x_px > other.x_px: + return False + elif self.y_px < other.y_px: + return True + else: + return False + + def __str__(self): + return f"{self.x_px}x{self.y_px}" + + @staticmethod + def from_str(in_str: str) -> 'ImageResolution': + if 'x' not in in_str: + raise ValueError("in_str is expected to contain delimiter 'x'.") + parts: list[str] = in_str.split('x') + if len(parts) > 2: + raise ValueError("in_str is expected to contain exactly one 'x'.") + x_px = int(parts[0]) + y_px = int(parts[1]) + return ImageResolution(x_px=x_px, y_px=y_px) + + +class ImageUtils: + """ + A "class" to group related static functions, like in a namespace. + The class itself is not meant to be instantiated. + """ + + def __init__(self): + raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") + + @staticmethod + def base64_to_image( + input_base64: str, + color_mode: ColorMode = "color" + ) -> numpy.ndarray: + """ + Assumes 8 bits per component + """ + + image_bytes: bytes = base64.b64decode(s=input_base64) + + color_flag: int = 0 + if color_mode == "color": + color_flag |= cv2.IMREAD_COLOR + elif color_mode == "greyscale": + color_flag |= cv2.IMREAD_GRAYSCALE + else: + logger.warning(f"Unsupported color mode specified: {color_mode}") + + opencv_image: numpy.ndarray = cv2.imdecode( + buf=numpy.frombuffer( + buffer=image_bytes, + dtype=numpy.uint8), + flags=color_flag) + return opencv_image + + @staticmethod + def black_image( + resolution_px: tuple[int, int], + ) -> numpy.ndarray: + return numpy.zeros((resolution_px[1], resolution_px[0], 3), dtype=numpy.uint8) + + @staticmethod + def bytes_to_base64( + image_bytes: bytes + ) -> str: + return base64.b64encode(image_bytes).decode("ascii") + + @staticmethod + def image_resize_to_fit( + opencv_image: numpy.ndarray, + available_size: tuple[int, int] # x, y + ) -> numpy.ndarray: + # note: opencv height represented by 1st dimension + source_resolution_px: tuple[int, int] = (opencv_image.shape[1], opencv_image.shape[0]) + image_width_px, image_height_px = ImageUtils.scale_factor_for_available_space_px( + source_resolution_px=source_resolution_px, + available_size_px=available_size) + return cv2.resize( + src=opencv_image, + dsize=(image_width_px, image_height_px)) + + @staticmethod + def image_to_base64( + image_data: numpy.ndarray, + image_format: ImageFormat = ".png", + ) -> str: + """ + :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) + :param image_format: e.g. ".jpg", ".png"... + :return: base64 string representing the image + """ + encoded_image_rgb_bytes: bytes = ImageUtils.image_to_bytes( + image_data=image_data, + image_format=image_format) + encoded_image_rgb_base64: str = ImageUtils.bytes_to_base64(encoded_image_rgb_bytes) + return encoded_image_rgb_base64 + + @staticmethod + def image_to_bytes( + image_data: numpy.ndarray, + image_format: ImageFormat = ".png", + ) -> bytes: + """ + :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) + :param image_format: e.g. ".jpg", ".png"... + :return: base64 string representing the image + """ + encoded_image_rgb_single_row: numpy.array + encoded, encoded_image_rgb_single_row = cv2.imencode(image_format, image_data) + encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() + return encoded_image_rgb_bytes + + @staticmethod + def partition_rect( + available_size_px: tuple[int, int], # x, y + partition_count: int + ) -> tuple[tuple[int, int], list[tuple[int, int]]]: # ((width_px, height_px), [(x_px, y_px)]) + """ + Partition a rectangular area into a grid, and return the rectangle definitions corresponding to the cells. + """ + width_cells: int = 1 + height_cells: int = 1 + cell_count: int = width_cells * height_cells + max_cell_count: int = 1000 # I don't think we'll ever get this high, but I want a theoretical iteration limit + while cell_count <= max_cell_count: + width_cells += 1 + cell_count = width_cells * height_cells + if cell_count >= partition_count: + break + height_cells += 1 + cell_count = width_cells * height_cells + if cell_count >= partition_count: + break + width_px: int = available_size_px[0] // width_cells + height_px: int = available_size_px[1] // height_cells + positions_px: list[tuple[int, int]] = list() + for cell_index in range(0, partition_count): + y_cell = cell_index // width_cells + x_cell = cell_index % width_cells + y_px = y_cell * height_px + x_px = x_cell * width_px + positions_px.append((x_px, y_px)) + return (width_px, height_px), positions_px + + @staticmethod + def scale_factor_for_available_space_px( + source_resolution_px: tuple[int, int], + available_size_px: tuple[int, int] + ) -> tuple[int, int]: + source_width_px: int = source_resolution_px[0] + source_height_px: int = source_resolution_px[1] + available_width_px: int = available_size_px[0] + available_height_px: int = available_size_px[1] + scale: float = min( + available_width_px / float(source_width_px), + available_height_px / float(source_height_px)) + return int(round(source_width_px * scale)), int(round(source_height_px * scale)) + + class StandardResolutions: + RES_640X360: Final[ImageResolution] = ImageResolution(x_px=640, y_px=360) + RES_640X480: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) + RES_800X600: Final[ImageResolution] = ImageResolution(x_px=800, y_px=600) + RES_1024X768: Final[ImageResolution] = ImageResolution(x_px=1024, y_px=768) + RES_1280X720: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=720) + RES_1280X800: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=800) + RES_1280X1024: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=1024) + RES_1920X1080: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) + + @staticmethod + def as_list(): + return [ + ImageUtils.StandardResolutions.RES_640X360, + ImageUtils.StandardResolutions.RES_640X480, + ImageUtils.StandardResolutions.RES_800X600, + ImageUtils.StandardResolutions.RES_1024X768, + ImageUtils.StandardResolutions.RES_1280X720, + ImageUtils.StandardResolutions.RES_1280X800, + ImageUtils.StandardResolutions.RES_1280X1024, + ImageUtils.StandardResolutions.RES_1920X1080] diff --git a/src/common/image_utils.py b/src/common/image_utils.py deleted file mode 100644 index 7cc4546..0000000 --- a/src/common/image_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -import cv2 -import numpy - - -class ImageUtils: - """ - A "class" to group related static functions, like in a namespace. - The class itself is not meant to be instantiated. - """ - - def __init__(self): - raise RuntimeError("ImageUtils is not meant to be instantiated.") - - @staticmethod - def black_image( - resolution_px: tuple[int, int], - ) -> numpy.ndarray: - return numpy.zeros((resolution_px[1], resolution_px[0], 3), dtype=numpy.uint8) - - @staticmethod - def image_resize_to_fit( - opencv_image: numpy.ndarray, - available_size: tuple[int, int] # x, y - ) -> numpy.ndarray: - # note: opencv height represented by 1st dimension - source_resolution_px: tuple[int, int] = (opencv_image.shape[1], opencv_image.shape[0]) - image_width_px, image_height_px = ImageUtils.scale_factor_for_available_space_px( - source_resolution_px=source_resolution_px, - available_size_px=available_size) - return cv2.resize( - src=opencv_image, - dsize=(image_width_px, image_height_px)) - - @staticmethod - def scale_factor_for_available_space_px( - source_resolution_px: tuple[int, int], - available_size_px: tuple[int, int] - ) -> tuple[int, int]: - source_width_px: int = source_resolution_px[0] - source_height_px: int = source_resolution_px[1] - available_width_px: int = available_size_px[0] - available_height_px: int = available_size_px[1] - scale: float = min( - available_width_px / float(source_width_px), - available_height_px / float(source_height_px)) - return int(round(source_width_px * scale)), int(round(source_height_px * scale)) diff --git a/src/common/math.py b/src/common/math.py new file mode 100644 index 0000000..6b732a6 --- /dev/null +++ b/src/common/math.py @@ -0,0 +1,811 @@ +from .image_processing import Annotation +import cv2 +import math # Python's math module, not the one from this project! +import numpy +from pydantic import BaseModel, Field +from scipy.spatial.transform import Rotation +from typing import ClassVar, Final + + +_DEFAULT_EPSILON: Final[float] = 0.0001 + + +class IntrinsicParameters(BaseModel): + """ + Camera intrinsic parameters (focal length, optical center, distortion coefficients). + See OpenCV's documentation: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html + See Wikipedia article: https://en.wikipedia.org/wiki/Distortion_%28optics%29 + """ + + focal_length_x_px: float = Field() + focal_length_y_px: float = Field() + optical_center_x_px: float = Field() + optical_center_y_px: float = Field() + + radial_distortion_coefficients: list[float] = Field() # k1, k2, k3 etc in OpenCV + + tangential_distortion_coefficients: list[float] = Field() # p1, p2 in OpenCV + + def as_array(self) -> list[float]: + return_value: list[float] = [ + self.focal_length_x_px, + self.focal_length_y_px, + self.optical_center_x_px, + self.optical_center_y_px] + return_value += self.get_distortion_coefficients() + return return_value + + def get_matrix(self) -> list[list[float]]: + """calibration matrix expected by OpenCV in some operations""" + return \ + [[self.focal_length_x_px, 0.0, self.optical_center_x_px], + [0.0, self.focal_length_y_px, self.optical_center_y_px], + [0.0, 0.0, 1.0]] + + def get_distortion_coefficients(self) -> list[float]: + """ + Distortion coefficients in array format expected by OpenCV in some operations. + See https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d + calibrateCamera() documentation describes order of distortion coefficients that OpenCV works with + """ + coefficients: list[float] = [ + self.radial_distortion_coefficients[0], + self.radial_distortion_coefficients[1], + self.tangential_distortion_coefficients[0], + self.tangential_distortion_coefficients[1]] + coefficients += self.radial_distortion_coefficients[2:] + return coefficients + + @staticmethod + def generate_zero_parameters( + resolution_x_px: int, + resolution_y_px: int, + fov_x_degrees: float = 45.0, + fov_y_degrees: float = 45.0 + ) -> "IntrinsicParameters": + optical_center_x_px: int = int(round(resolution_x_px/2.0)) + fov_x_radians: float = fov_x_degrees * math.pi / 180.0 + focal_length_x_px = (resolution_x_px / 2.0) / math.tan(fov_x_radians / 2.0) + optical_center_y_px: int = int(round(resolution_y_px/2.0)) + fov_y_radians: float = fov_y_degrees * math.pi / 180.0 + focal_length_y_px = (resolution_y_px / 2.0) / math.tan(fov_y_radians / 2.0) + return IntrinsicParameters( + focal_length_x_px=focal_length_x_px, + focal_length_y_px=focal_length_y_px, + optical_center_x_px=optical_center_x_px, + optical_center_y_px=optical_center_y_px, + radial_distortion_coefficients=[0.0, 0.0, 0.0], + tangential_distortion_coefficients=[0.0, 0.0]) + + +class IterativeClosestPointParameters(BaseModel): + # ICP will stop after this many iterations + termination_iteration_count: int = Field() + + # ICP will stop if distance *and* angle difference from one iteration to the next + # is smaller than these + termination_delta_translation: float = Field() + termination_delta_rotation_radians: float = Field() + + # ICP will stop if overall point-to-point distance (between source and target) + # mean *or* root-mean-square is less than specified + termination_mean_point_distance: float = Field() + termination_rms_point_distance: float = Field() # root-mean-square + + +class Landmark(BaseModel): + + # These can denote that multiple landmarks are related if they share the same + # "base label" (the part before the first and only occurrence of this character). + RELATION_CHARACTER: ClassVar[str] = "$" + + """ + A distinct point in 3D space. + Coordinates are in the unit of the user's choosing. + """ + feature_label: str = Field() + x: float = Field() + y: float = Field() + z: float = Field() + + def as_float_list(self) -> list[float]: + return [self.x, self.y, self.z] + + def base_feature_label(self) -> str: + """ + Part of the label before the RELATION_CHARACTER. + """ + if self.RELATION_CHARACTER not in self.feature_label: + return self.feature_label + return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] + + +class Matrix4x4(BaseModel): + + @staticmethod + def _identity_values() -> list[float]: + return \ + [1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0] + values: list[float] = Field(default_factory=_identity_values) + + def as_numpy_array(self): + a = self.values + return numpy.asarray( + [[a[0], a[1], a[2], a[3]], + [a[4], a[5], a[6], a[7]], + [a[8], a[9], a[10], a[11]], + [a[12], a[13], a[14], a[15]]]) + + def __getitem__(self, idx: tuple[int, int]) -> float: + if isinstance(idx, tuple): + return self.values[(idx[0]*4) + idx[1]] + else: + raise ValueError("Unexpected index. Expected tuple index [row,col].") + + def __mul__(self, other) -> 'Matrix4x4': + if not isinstance(other, Matrix4x4): + raise ValueError + result_numpy_array = numpy.matmul(self.as_numpy_array(), other.as_numpy_array()) + return Matrix4x4(values=list(result_numpy_array.flatten())) + + def get_rotation_as_quaternion(self, canonical: bool = False) -> list[float]: + # noinspection PyArgumentList + return Rotation.from_matrix(self.as_numpy_array()[0:3, 0:3]).as_quat(canonical=canonical).tolist() + + def get_translation(self) -> list[float]: + """ + Return a vector of [x,y,z] representing translation. + """ + a = self.values + return [a[3], a[7], a[11]] + + def inverse(self) -> 'Matrix4x4': + inv_numpy_array = numpy.linalg.inv(self.as_numpy_array()) + return Matrix4x4.from_numpy_array(inv_numpy_array) + + # Note that these methods are not yet tested, and are disabled for now + # def set_rotation_from_quaternion(self, quaternion: list[float]) -> None: + # if len(quaternion) < 4: + # raise ValueError() + # # noinspection PyArgumentList + # rotation_matrix: numpy.ndarray = Rotation.from_quat(quaternion).as_matrix() + # self.values[0:3] = rotation_matrix[0, 0:3] + # self.values[4:7] = rotation_matrix[1, 0:3] + # self.values[8:11] = rotation_matrix[2, 0:3] + # def set_translation(self, translation: list[float]) -> None: + # if len(translation) < 3: + # raise ValueError() + # a = self.values + # a[3], a[7], a[11] = translation[0], translation[1], translation[2] + + @staticmethod + def from_raw_values( + v00, v01, v02, v03, + v10, v11, v12, v13, + v20, v21, v22, v23, + v30, v31, v32, v33 + ) -> 'Matrix4x4': + return Matrix4x4(values=[ + v00, v01, v02, v03, + v10, v11, v12, v13, + v20, v21, v22, v23, + v30, v31, v32, v33]) + + @staticmethod + def from_list( + value_list: list[float] + ) -> 'Matrix4x4': + if len(value_list) != 16: + raise ValueError(f"Expected a list of 16 float. Got {str(value_list)}.") + return Matrix4x4(values=list(value_list)) + + @staticmethod + def from_numpy_array( + value_array: numpy.ndarray + ) -> 'Matrix4x4': + if len(value_array) != 4: + raise ValueError(f"Expected input array to have 4 rows. Got {len(value_array)}.") + for i in range(0, len(value_array)): + if len(value_array[i]) != 4: + raise ValueError(f"Expected input row {i} to have 4 col. Got {len(value_array[i])}.") + return Matrix4x4(values=list(value_array.flatten())) + + +class Pose(BaseModel): + target_id: str = Field() + object_to_reference_matrix: Matrix4x4 = Field() + solver_timestamp_utc_iso8601: str = Field() + + +class Ray: + source_point: list[float] + direction: list[float] + + def __init__( + self, + source_point: list[float], + direction: list[float], + epsilon: float = _DEFAULT_EPSILON + ): + direction_norm = numpy.linalg.norm(direction) + if direction_norm < epsilon: + raise ValueError("Direction cannot be zero.") + self.source_point = source_point + self.direction = direction + + +class FeatureRay(BaseModel, Ray): + """Same as Ray, but with an added feature_label str""" + source_point: list[float] + direction: list[float] + feature_label: str + + +class Target(BaseModel): + """ + A trackable object. + """ + label: str + landmarks: list[Landmark] + + def get_landmark_point( + self, + feature_label: str + ) -> list[float]: + for landmark in self.landmarks: + if landmark.feature_label == feature_label: + return landmark.as_float_list() + raise ValueError + + +class MathUtils: + """ + static class for reused math-related functions. + """ + + def __init__(self): + raise RuntimeError("This class is not meant to be initialized.") + + @staticmethod + def average_quaternion( + quaternions: list[list[float]] + ) -> list[float]: + """ + Solution based on this link: https://stackoverflow.com/a/27410865 + based on Markley et al. "Averaging quaternions." Journal of Guidance, Control, and Dynamics 30.4 (2007): 1193-1197. + """ + quaternion_matrix = numpy.array(quaternions, dtype="float32").transpose() # quaternions into columns + quaternion_matrix /= len(quaternions) + eigenvalues, eigenvectors = numpy.linalg.eig(numpy.matmul(quaternion_matrix, quaternion_matrix.transpose())) + maximum_eigenvalue_index = numpy.argmax(eigenvalues) + quaternion = eigenvectors[:, maximum_eigenvalue_index] + if quaternion[3] < 0: + quaternion *= -1 + return quaternion.tolist() + + @staticmethod + def average_vector( + translations: list[list[float]] + ) -> list[float]: + """ + This is a very simple function for averaging translations + when it is not desired to use numpy (for whatever reason) + """ + sum_translations: list[float] = [0.0, 0.0, 0.0] + for translation in translations: + for i in range(0, 3): + sum_translations[i] += translation[i] + translation_count = len(translations) + return [ + sum_translations[0] / translation_count, + sum_translations[1] / translation_count, + sum_translations[2] / translation_count] + + class RayIntersection2Output: + parallel: bool # special case, mark it as such + closest_point_1: numpy.ndarray + closest_point_2: numpy.ndarray + + def __init__( + self, + parallel: bool, + closest_point_1: numpy.ndarray, + closest_point_2: numpy.ndarray + ): + self.parallel = parallel + self.closest_point_1 = closest_point_1 + self.closest_point_2 = closest_point_2 + + def centroid(self) -> numpy.ndarray: + return (self.closest_point_1 + self.closest_point_2) / 2 + + def distance(self) -> float: + return float(numpy.linalg.norm(self.closest_point_2 - self.closest_point_1)) + + @staticmethod + def closest_intersection_between_two_lines( + ray_1: Ray, + ray_2: Ray, + epsilon: float = _DEFAULT_EPSILON + ) -> RayIntersection2Output: # Returns data on intersection + ray_1_direction_normalized = numpy.asarray(ray_1.direction) / numpy.linalg.norm(ray_1.direction) + ray_2_direction_normalized = numpy.asarray(ray_2.direction) / numpy.linalg.norm(ray_2.direction) + + # ray 3 will be perpendicular to both rays 1 and 2, + # and will intersect with both rays at the nearest point(s) + + ray_3_direction = numpy.cross(ray_2_direction_normalized, ray_1_direction_normalized) + ray_3_direction_norm = numpy.linalg.norm(ray_3_direction) + if ray_3_direction_norm < epsilon: + return MathUtils.RayIntersection2Output( + parallel=True, + closest_point_1=numpy.asarray(ray_1.source_point), + closest_point_2=numpy.asarray(ray_2.source_point)) + + # system of equations Ax = b + b = numpy.subtract(ray_2.source_point, ray_1.source_point) + a = numpy.asarray( + [ray_1_direction_normalized, -ray_2_direction_normalized, ray_3_direction], dtype="float32").transpose() + x = numpy.linalg.solve(a, b) + + param_ray_1 = float(x[0]) + intersection_point_1 = ray_1.source_point + param_ray_1 * ray_1_direction_normalized + + param_ray_2 = float(x[1]) + intersection_point_2 = ray_2.source_point + param_ray_2 * ray_2_direction_normalized + + return MathUtils.RayIntersection2Output( + parallel=False, + closest_point_1=intersection_point_1, + closest_point_2=intersection_point_2) + + class RayIntersectionNOutput: + centroids: numpy.ndarray + + # How many rays were used. + # Note that centroids might not use all possible intersections (e.g. parallel rays, exceeded maximum distance) + ray_count: int + + def __init__( + self, + centroids: numpy.ndarray, + ray_count: int + ): + self.centroids = centroids + self.ray_count = ray_count + + def centroid(self) -> numpy.ndarray: + sum_centroids = numpy.asarray([0, 0, 0], dtype="float32") + for centroid in self.centroids: + sum_centroids += centroid + return sum_centroids / self.centroids.shape[0] + + def intersection_count(self) -> int: + return int((self.ray_count * (self.ray_count - 1)) / 2) + + @staticmethod + def closest_intersection_between_n_lines( + rays: list[Ray], + maximum_distance: float + ) -> RayIntersectionNOutput: + ray_count = len(rays) + intersections: list[MathUtils.RayIntersection2Output] = list() + for ray_1_index in range(0, ray_count): + for ray_2_index in range(ray_1_index + 1, ray_count): + intersections.append(MathUtils.closest_intersection_between_two_lines( + ray_1=rays[ray_1_index], + ray_2=rays[ray_2_index])) + centroids: list[numpy.ndarray] = list() + for intersection in intersections: + if intersection.parallel: + continue + if intersection.distance() > maximum_distance: + continue + centroids.append(intersection.centroid()) + return MathUtils.RayIntersectionNOutput( + centroids=numpy.asarray(centroids, dtype="float32"), + ray_count=ray_count) + + @staticmethod + def closest_point_on_ray( + ray_source: list[float], + ray_direction: list[float], + query_point: list[float], + forward_only: bool + ): + """ + Find the closest point on a ray in 3D. + """ + # Let ray_point be the closest point between query_point and the ray. + # (ray_point - query_point) will be perpendicular to ray_direction. + # Let ray_distance be the distance along the ray where the closest point is. + # So we have two equations: + # (1) (ray_point - query_point) * ray_direction = 0 + # (2) ray_point = ray_source + ray_distance * ray_direction + # If we substitute eq (2) into (1) and solve for ray_distance, we get: + ray_distance: float = ( + (query_point[0] * ray_direction[0] + query_point[1] * ray_direction[1] + query_point[2] * ray_direction[ + 2] + - ray_source[0] * ray_direction[0] - ray_source[1] * ray_direction[1] - ray_source[2] * ray_direction[ + 2]) + / + ((ray_direction[0] ** 2) + (ray_direction[1] ** 2) + (ray_direction[2] ** 2))) + + if ray_distance < 0 and forward_only: + return ray_source # point is behind the source, so the closest point is just the source + + ray_point = [0.0] * 3 # temporary values + for i in range(0, 3): + ray_point[i] = ray_source[i] + ray_distance * ray_direction[i] + return ray_point + + @staticmethod + def convert_detector_points_to_vectors( + points: list[list[float]], # [point_index][x/y] + detector_intrinsics: IntrinsicParameters, + detector_to_reference_matrix: Matrix4x4 + ) -> list[list[float]]: + """ + Given a detector's matrix transform and its intrinsic properties, + convert pixel coordinates to ray directions (with origin at the detector). + """ + distorted_points: numpy.ndarray = numpy.asarray(points) + distorted_points = numpy.reshape( + distorted_points, + newshape=(1, len(points), 2)) + undistorted_points: numpy.ndarray = cv2.undistortPoints( + src=distorted_points, + cameraMatrix=numpy.asarray(detector_intrinsics.get_matrix()), + distCoeffs=numpy.asarray(detector_intrinsics.get_distortion_coefficients())) + rays: list[list[float]] = list() + origin_point_detector = [0, 0, 0, 1] # origin + detector_to_reference: numpy.ndarray = detector_to_reference_matrix.as_numpy_array() + for undistorted_point in undistorted_points: + target_point_image = [undistorted_point[0, 0], undistorted_point[0, 1], 1, 1] # reverse perspective + target_point_detector = MathUtils.image_to_opengl_vector(target_point_image) + ray_direction_detector = numpy.subtract(target_point_detector, origin_point_detector) + ray_direction_detector = ray_direction_detector / numpy.linalg.norm(ray_direction_detector) # normalize + ray_direction_reference = numpy.matmul(detector_to_reference, ray_direction_detector) + rays.append(list(ray_direction_reference[0:3])) + return rays + + @staticmethod + def convex_quadrilateral_area( + points: list[list[float]], # 2D points in clockwise order + epsilon: float = _DEFAULT_EPSILON + ) -> float: + """ + Compute the area of a quadrilateral, given 2D points in clockwise order. + """ + + # General approach: + # Given points a, b, c, and d shown below, + # and calculating points e and f shown below, + # add areas defined by right triangles bea, ceb, dfc, and afd + # b..................c + # . .. ... + # . ... ... . + # . .. .f. . + # . .e. ... . + # . ... .. . + # ... ... . + # a...................d + + point_a = numpy.array(points[0], dtype="float32") + point_b = numpy.array(points[1], dtype="float32") + point_c = numpy.array(points[2], dtype="float32") + point_d = numpy.array(points[3], dtype="float32") + + vector_ac = point_c - point_a + vector_ac_norm = numpy.linalg.norm(vector_ac) + vector_bd = point_d - point_b + vector_bd_norm = numpy.linalg.norm(vector_bd) + if vector_ac_norm <= epsilon or vector_bd_norm <= epsilon: + return 0.0 + width_vector = vector_ac / numpy.linalg.norm(vector_ac) + height_vector = numpy.array([width_vector[1], -width_vector[0]], dtype="float32") # rotated 90 degrees + + sum_of_areas: float = 0.0 + point_pairs: list[tuple[numpy.ndarray, numpy.ndarray]] = [ + (point_a, point_b), + (point_b, point_c), + (point_c, point_d), + (point_d, point_a)] + for point_pair in point_pairs: + line_vector = point_pair[1] - point_pair[0] + width = numpy.dot(line_vector, width_vector) + height = numpy.dot(line_vector, height_vector) + sum_of_areas += numpy.abs(width * height / 2.0) + + return sum_of_areas + + @staticmethod + def estimate_matrix_transform_to_detector( + annotations: list[Annotation], + landmarks: list[Landmark], + detector_intrinsics: IntrinsicParameters + ) -> tuple[bool, Matrix4x4 | None]: + """ + returns: Tuple containing: + 0: 'estimated' bool indicating whether it was possible to make an estimate. + Could be false if e.g. no annotations correspond to the provided landmarks + 1: If 'estimated' is True, the matrix transform that was calculated. Otherwise None. + """ + target_points: list[list[float]] = list() # ordered points [point_index][x/y/z] + detector_points: list[list[float]] = list() # ordered points [point_index][x/y] + annotations_dict: dict[str, Annotation] = {annotation.feature_label: annotation for annotation in annotations} + for landmark in landmarks: + if landmark.feature_label in annotations_dict.keys(): + annotation = annotations_dict[landmark.feature_label] + target_points.append([landmark.x, landmark.y, landmark.z]) + detector_points.append([annotation.x_px, annotation.y_px]) + if len(detector_points) <= 4: + return False, None + rotation_vector: numpy.ndarray + translation_vector: numpy.ndarray + _, rotation_vector, translation_vector = cv2.solvePnP( + objectPoints=numpy.asarray(target_points), + imagePoints=numpy.asarray(detector_points), + cameraMatrix=numpy.asarray(detector_intrinsics.get_matrix()), + distCoeffs=numpy.asarray(detector_intrinsics.get_distortion_coefficients())) + rotation_vector = rotation_vector.flatten() + translation_vector = translation_vector.flatten() + object_to_camera_matrix = numpy.identity(4, dtype="float32") + object_to_camera_matrix[0:3, 0:3] = Rotation.from_rotvec(rotation_vector).as_matrix() + object_to_camera_matrix[0:3, 3] = translation_vector[0:3] + object_to_detector_matrix = MathUtils.image_to_opengl_transformation_matrix(object_to_camera_matrix) + return Matrix4x4.from_numpy_array(object_to_detector_matrix) + + class IterativeClosestPointOutput: + source_to_target_matrix: Matrix4x4 + iteration_count: int + mean_point_distance: float + rms_point_distance: float + + def __init__( + self, + source_to_target_matrix: Matrix4x4, + iteration_count: int, + mean_point_distance: float, + rms_point_distance: float + ): + self.source_to_target_matrix = source_to_target_matrix + self.iteration_count = iteration_count + self.mean_point_distance = mean_point_distance + self.rms_point_distance = rms_point_distance + + @staticmethod + def iterative_closest_point_for_points_and_rays( + source_known_points: list[list[float]], + target_known_points: list[list[float]], + source_ray_points: list[list[float]], + target_rays: list[Ray], + parameters: IterativeClosestPointParameters = None, + initial_transformation_matrix: numpy.ndarray = None + ) -> IterativeClosestPointOutput: + """ + Algorithm is based on ICP: Besl and McKay. Method for registration of 3-D shapes. 1992. + This is customized, adapted to the problem of registering a set of points to + a set of points and rays where the correspondence is known. + :param source_known_points: points with known corresponding positions in both source and target coordinate frames + :param target_known_points: points with known corresponding positions in both source and target coordinate frames + :param source_ray_points: points with known position in the source coordinate frame, but NOT in target + :param target_rays: rays along which the remaining target points lie (1:1 correspondence with source_ray_points) + :param parameters: + :param initial_transformation_matrix: + """ + + def _transform_points( + original_points: list[list[float]], + transformation: numpy.ndarray + ): + transformed_points: list[list[float]] = list() + for point in original_points: + transformed_point = list(numpy.matmul( + transformation, + numpy.array([point[0], point[1], point[2], 1]))) + transformed_points.append([transformed_point[0], transformed_point[1], transformed_point[2]]) + return transformed_points + + if len(source_known_points) != len(target_known_points): + raise ValueError( + "source_known_points and target_known_points must be of equal length (1:1 correspondence).") + + if len(source_known_points) != len(target_known_points): + raise ValueError("source_ray_points and target_rays must be of equal length (1:1 correspondence).") + + # Initial transformation + source_to_transformed_matrix: numpy.ndarray + if initial_transformation_matrix is not None: + source_to_transformed_matrix = numpy.array(initial_transformation_matrix, dtype="float32") + else: + source_to_transformed_matrix = numpy.identity(4, dtype="float32") + + if parameters is None: + parameters = IterativeClosestPointParameters( + termination_iteration_count=50, + termination_delta_translation=0.1, + termination_delta_rotation_radians=0.001, + termination_mean_point_distance=0.1, + termination_rms_point_distance=0.1) + + transformed_known_points: list[list[float]] = _transform_points( + original_points=source_known_points, + transformation=source_to_transformed_matrix) + transformed_ray_points: list[list[float]] = _transform_points( + original_points=source_ray_points, + transformation=source_to_transformed_matrix) + + iteration_count: int = 0 + mean_point_distance: float + rms_point_distance: float + while True: + target_ray_points: list[list[float]] = list() + for i, transformed_ray_point in enumerate(transformed_ray_points): + target_ray_points.append(MathUtils.closest_point_on_ray( + ray_source=target_rays[i].source_point, + ray_direction=target_rays[i].direction, + query_point=transformed_ray_point, + forward_only=True)) + + transformed_all_points = transformed_known_points + transformed_ray_points + target_points = target_known_points + target_ray_points + transformed_to_target_matrix = MathUtils.register_corresponding_points( + point_set_from=transformed_all_points, + point_set_to=target_points, + collinearity_do_check=False) + + # update transformation & transformed points + source_to_transformed_matrix = numpy.matmul(transformed_to_target_matrix, source_to_transformed_matrix) + transformed_known_points: list[list[float]] = _transform_points( + original_points=source_known_points, + transformation=source_to_transformed_matrix) + transformed_ray_points: list[list[float]] = _transform_points( + original_points=source_ray_points, + transformation=source_to_transformed_matrix) + + iteration_count += 1 + + transformed_all_points = transformed_known_points + transformed_ray_points + point_offsets = numpy.subtract(target_points, transformed_all_points).tolist() + sum_point_distances = 0.0 + sum_square_point_distances = 0.0 + for delta_point_offset in point_offsets: + delta_point_distance: float = numpy.linalg.norm(delta_point_offset) + sum_point_distances += delta_point_distance + sum_square_point_distances += numpy.square(delta_point_distance) + mean_point_distance = sum_point_distances / len(point_offsets) + rms_point_distance = numpy.sqrt(sum_square_point_distances / len(point_offsets)) + + # Check if termination criteria are met + # Note that transformed_to_target_matrix describes the change since last iteration, so we often operate on it + delta_translation = numpy.linalg.norm(transformed_to_target_matrix[0:3, 3]) + delta_rotation_radians = \ + numpy.linalg.norm(Rotation.from_matrix(transformed_to_target_matrix[0:3, 0:3]).as_rotvec()) + if delta_translation < parameters.termination_delta_translation and \ + delta_rotation_radians < parameters.termination_delta_rotation_radians: + break + if mean_point_distance < parameters.termination_mean_point_distance: + break + if rms_point_distance < parameters.termination_rms_point_distance: + break + if iteration_count >= parameters.termination_iteration_count: + break + + return MathUtils.IterativeClosestPointOutput( + source_to_target_matrix=Matrix4x4.from_numpy_array(source_to_transformed_matrix), + iteration_count=iteration_count, + mean_point_distance=mean_point_distance, + rms_point_distance=rms_point_distance) + + @staticmethod + def image_to_opengl_transformation_matrix( + transformation_matrix_image: numpy.ndarray + ) -> numpy.ndarray: + transformation_matrix_detector = numpy.array(transformation_matrix_image) + transformation_matrix_detector[1:3, :] = -transformation_matrix_detector[1:3, :] + return transformation_matrix_detector + # Equivalent to: + # transformation_matrix_180 = numpy.identity(4, dtype="float") + # transformation_matrix_180[1, 1] *= -1 + # transformation_matrix_180[2, 2] *= -1 + # return numpy.matmul(transformation_matrix_180, transformation_matrix_image) + + @staticmethod + def image_to_opengl_vector( + vector_image: numpy.ndarray | list[float] + ) -> numpy.ndarray: + vector_detector = numpy.array(vector_image) + vector_detector[1:3] = -vector_detector[1:3] + return vector_detector + # Equivalent to: + # transformation_matrix_180 = numpy.identity(4, dtype="float") + # transformation_matrix_180[1, 1] *= -1 + # transformation_matrix_180[2, 2] *= -1 + # return numpy.matmul(transformation_matrix_180, vector_image) + + @staticmethod + def register_corresponding_points( + point_set_from: list[list[float]], + point_set_to: list[list[float]], + collinearity_do_check: bool = True, + collinearity_zero_threshold: float = 0.0001, + use_oomori_mirror_fix: bool = True + ) -> numpy.ndarray: # 4x4 transformation matrix, indexed by [row,col] + """ + Solution based on: Arun et al. Least square fitting of two 3D point sets (1987) + https://stackoverflow.com/questions/66923224/rigid-registration-of-two-point-clouds-with-known-correspondence + Use mirroring solution proposed by Oomori et al. + Oomori et al. Point cloud matching using singular value decomposition. (2016) + :param point_set_from: + :param point_set_to: + :param collinearity_do_check: Do a (naive) collinearity check. May be computationally expensive. + :param collinearity_zero_threshold: Threshold considered zero for cross product and norm comparisons + :param use_oomori_mirror_fix: Use the mirroring solution proposed in Oomori et al 2016. + """ + if len(point_set_from) != len(point_set_to): + raise ValueError("Input point sets must be of identical length.") + if len(point_set_from) < 3: + raise ValueError("Input point sets must be of length 3 or higher.") + if collinearity_do_check: + for point_set in (point_set_from, point_set_to): + collinear = True # assume true until shown otherwise + p1: numpy.ndarray = numpy.asarray(point_set[0]) + vec1: numpy.ndarray + i: int = 1 + for i in range(i, len(point_set)): + p2: numpy.ndarray = numpy.asarray(point_set[1]) + vec1 = p2 - p1 + vec1_length: float = float(numpy.linalg.norm(vec1)) + if vec1_length > collinearity_zero_threshold: + break # points are distinct, move to next phase + for i in range(i, len(point_set)): + p3: numpy.ndarray = numpy.asarray(point_set[2]) + vec2: numpy.ndarray = p3 - p1 + # noinspection PyUnboundLocalVariable + cross_product_norm: float = float(numpy.linalg.norm(numpy.cross(vec1, vec2))) + if cross_product_norm > collinearity_zero_threshold: + collinear = False + break + if collinear: + raise ValueError("Input points appear to be collinear - please check the input.") + + # for consistency, points are in rows + point_count = len(point_set_from) + sums_from = numpy.array([0, 0, 0], dtype="float32") + sums_to = numpy.array([0, 0, 0], dtype="float32") + for point_index in range(0, point_count): + sums_from += numpy.array(point_set_from[point_index]) + sums_to += numpy.array(point_set_to[point_index]) + centroid_from = (sums_from / point_count) + centroid_to = (sums_to / point_count) + points_from = numpy.array(point_set_from) + points_to = numpy.array(point_set_to) + centered_points_from = points_from - numpy.hstack(centroid_from) + centered_points_to = points_to - numpy.hstack(centroid_to) + covariance = numpy.matmul(centered_points_from.T, centered_points_to) + u, _, vh = numpy.linalg.svd(covariance) + s = numpy.identity(3, dtype="float32") # s will be the Oomori mirror fix + if use_oomori_mirror_fix: + s[2, 2] = numpy.linalg.det(numpy.matmul(u, vh)) + rotation = numpy.matmul(u, numpy.matmul(s, vh)).transpose() + translation = centroid_to - numpy.matmul(rotation, centroid_from) + matrix = numpy.identity(4, dtype="float32") + matrix[0:3, 0:3] = rotation + matrix[0:3, 3] = translation[0:3].reshape(3) + return matrix + + @staticmethod + def square_marker_corner_points( + marker_size: float + ) -> list[list[float]]: #[corner_index][dimension_index], 3D + half_width = marker_size / 2.0 + corner_points = [ + [-half_width, half_width, 0., 1.], # Top-left + [half_width, half_width, 0., 1.], # Top-right + [half_width, -half_width, 0., 1.], # Bottom-right + [-half_width, -half_width, 0., 1.]] # Bottom-left + return corner_points diff --git a/src/common/mct_component.py b/src/common/mct_component.py index bc512b7..5e804ed 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -1,6 +1,4 @@ -from .get_kwarg import get_kwarg -from .status_message_source import StatusMessageSource -from src.common.api import \ +from .api import \ DequeueStatusMessagesRequest, \ DequeueStatusMessagesResponse, \ EmptyResponse, \ @@ -13,21 +11,64 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from src.common.exceptions import MCTParsingError -from src.common.structures import \ - MCTParsable, \ +from .image_processing import \ + Annotation, \ + ImageResolution +from .math import \ + Pose +from .serialization import \ + MCTDeserializable, \ + MCTSerializationError +from .status import \ SeverityLabel, \ - StatusMessage + StatusMessage, \ + StatusMessageSource import abc import datetime from fastapi import WebSocket, WebSocketDisconnect import logging +from pydantic import BaseModel, Field from typing import Callable, Optional, TypeVar + logger = logging.getLogger(__name__) -ParsableDynamicSingle = TypeVar('ParsableDynamicSingle', bound=MCTParsable) +SerializableSingle = TypeVar('SerializableSingle', bound=MCTDeserializable) +T = TypeVar("T") + + +class DetectorFrame(BaseModel): + annotations: list[Annotation] = Field(default_factory=list) + timestamp_utc_iso8601: str = Field() + image_resolution: ImageResolution = Field() + + @property + def annotations_identified(self): + return [ + annotation + for annotation in self.annotations + if annotation.feature_label != Annotation.UNIDENTIFIED_LABEL] + + @property + def annotations_unidentified(self): + return [ + annotation + for annotation in self.annotations + if annotation.feature_label == Annotation.UNIDENTIFIED_LABEL] + + @property + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +class MixerFrame(BaseModel): + detector_poses: list[Pose] | None = Field() + target_poses: list[Pose] | None = Field() + timestamp_utc_iso8601: str = Field() + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) class MCTComponent(abc.ABC): @@ -67,15 +108,15 @@ def add_status_subscriber( def parse_dynamic_series_list( self, parsable_series_dict: dict, - supported_types: list[type[ParsableDynamicSingle]] - ) -> list[ParsableDynamicSingle]: + supported_types: dict[str, type[SerializableSingle]] + ) -> list[SerializableSingle]: try: - return MCTParsable.parse_dynamic_series_list( - parsable_series_dict=parsable_series_dict, + return MCTDeserializable.deserialize_series_list( + series_dict=parsable_series_dict, supported_types=supported_types) - except MCTParsingError as e: + except MCTSerializationError as e: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=e.message) raise e @@ -83,7 +124,7 @@ def dequeue_status_messages(self, **kwargs) -> DequeueStatusMessagesResponse: """ :key client_identifier: str """ - client_identifier: str = get_kwarg( + client_identifier: str = self.get_kwarg( kwargs=kwargs, key="client_identifier", arg_type=str) @@ -92,11 +133,48 @@ def dequeue_status_messages(self, **kwargs) -> DequeueStatusMessagesResponse: return DequeueStatusMessagesResponse( status_messages=status_messages) + @staticmethod + def get_kwarg( + kwargs: dict, + key: str, + arg_type: type[T], + required: bool = True + ) -> T | None: + """ + :param kwargs: kwargs as a dict (without the "**") + :param key: key to search for + :param arg_type: expected type + :param required: If the keyword does not exist, then: + required == True -> Raise ValueError + required == False -> Return None + """ + + if key not in kwargs: + if required: + raise ValueError(f"Missing required key {key} in keyword arguments.") + return None + value: T = kwargs[key] + if not isinstance(value, arg_type): + raise ValueError( + f"Expected keyword argument {key} to be of type {arg_type.__name__}, " + f"but got {type(value).__name__}.") + return value + + @staticmethod + @abc.abstractmethod + def get_role_label(): + pass + def get_status_message_source(self): return self._status_message_source + def supported_request_types(self) -> dict[str, list[MCTRequest]]: + return { + request_type.type_identifier(): request_type + for request_type in self.supported_request_methods().keys()} + @abc.abstractmethod - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: """ All subclasses are expected to implement this method, even if it is simply a call to super(). :return: @@ -111,11 +189,11 @@ def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCT TimeSyncStopRequest: self.time_sync_stop} def timestamp_get(self, **kwargs) -> TimestampGetResponse: - request: TimestampGetRequest = get_kwarg( + request: TimestampGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=TimestampGetRequest) - timestamp_utc_iso8601 : str = datetime.datetime.utcnow().isoformat() + timestamp_utc_iso8601 : str = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() return TimestampGetResponse( requester_timestamp_utc_iso8601=request.requester_timestamp_utc_iso8601, responder_timestamp_utc_iso8601=timestamp_utc_iso8601) @@ -140,15 +218,15 @@ async def websocket_handler(self, websocket: WebSocket) -> None: try: request_series_list: list[MCTRequest] = self.parse_dynamic_series_list( parsable_series_dict=request_series_dict, - supported_types=list(self.supported_request_types().keys())) - except MCTParsingError as e: + supported_types=self.supported_request_types()) + except MCTSerializationError as e: logger.exception(str(e)) - await websocket.send_json(MCTResponseSeries(requests_parsed=False).dict()) + await websocket.send_json(MCTResponseSeries().model_dump()) continue response_series: MCTResponseSeries = self.websocket_handle_requests( client_identifier=client_identifier, request_series=MCTRequestSeries(series=request_series_list)) - await websocket.send_json(response_series.dict()) + await websocket.send_json(response_series.model_dump()) except WebSocketDisconnect as e: print(f"DISCONNECTED: {str(e)}") logger.info(str(e)) @@ -158,7 +236,7 @@ def websocket_handle_requests( client_identifier: str, request_series: MCTRequestSeries ) -> MCTResponseSeries: - request_map: dict[type[MCTRequest], Callable] = self.supported_request_types() + request_map: dict[type[MCTRequest], Callable] = self.supported_request_methods() response_series: list[MCTResponse] = list() for request in request_series.series: # noinspection PyBroadException @@ -172,13 +250,16 @@ def websocket_handle_requests( else: message: str = f"Received unimplemented parsable_type: {request.parsable_type}." logger.error(message) - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) response_series.append(ErrorResponse(message=message)) except Exception as e: message: str = f"Internal error. Failed to process request series." logger.error(message + " " + str(e)) - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) response_series.append(ErrorResponse(message=message)) return MCTResponseSeries( - requests_parsed=True, series=response_series) diff --git a/src/common/pose_solver.py b/src/common/pose_solver.py new file mode 100644 index 0000000..dfdbf6f --- /dev/null +++ b/src/common/pose_solver.py @@ -0,0 +1,574 @@ +from .image_processing import Annotation +from .math import \ + IntrinsicParameters, \ + IterativeClosestPointParameters, \ + MathUtils, \ + Matrix4x4, \ + Pose, \ + Ray, \ + Target +from .status import MCTError +import cv2 +import cv2.aruco +import datetime +import itertools +import numpy +from pydantic import BaseModel, Field +from scipy.spatial.transform import Rotation +from typing import Final, TypeVar + + +EPSILON: Final[float] = 0.0001 +_CORNER_COUNT: Final[int] = 4 + +KeyType = TypeVar("KeyType") +ValueType = TypeVar("ValueType") + + +class _DetectorRecord: + """ + Class whose purpose is to keep track of the latest position of each landmark (in annotation form) + for a single detector. + """ + + class TimestampedAnnotation: + annotation: Annotation + timestamp_utc: datetime.datetime + def __init__( + self, + annotation: Annotation, + timestamp_utc: datetime.datetime + ): + self.annotation = annotation + self.timestamp_utc = timestamp_utc + + _timestamped_annotations: dict[str, TimestampedAnnotation] + + def __init__(self): + self._timestamped_annotations = dict() + + def add_frame_record( + self, + frame_annotations: list[Annotation], + frame_timestamp_utc: datetime.datetime + ) -> None: + for annotation in frame_annotations: + if annotation.feature_label not in self._timestamped_annotations: + self._timestamped_annotations[annotation.feature_label] = _DetectorRecord.TimestampedAnnotation( + annotation=annotation, + timestamp_utc=frame_timestamp_utc) + continue + timestamped_annotation: _DetectorRecord.TimestampedAnnotation = \ + self._timestamped_annotations[annotation.feature_label] + if frame_timestamp_utc > timestamped_annotation.timestamp_utc: + self._timestamped_annotations[annotation.feature_label] = _DetectorRecord.TimestampedAnnotation( + annotation=annotation, + timestamp_utc=frame_timestamp_utc) + + def clear_frame_records(self): + self._timestamped_annotations.clear() + + def clear_frame_records_older_than( + self, + timestamp_utc: datetime.datetime + ) -> bool: + """ + returns True if any changes were made + """ + feature_labels_to_remove: list[str] = list() + entry: _DetectorRecord.TimestampedAnnotation + for entry in self._timestamped_annotations.values(): + if entry.timestamp_utc < timestamp_utc: + feature_labels_to_remove.append(entry.annotation.feature_label) + if len(feature_labels_to_remove) <= 0: + return False + for feature_label in feature_labels_to_remove: + del self._timestamped_annotations[feature_label] + return True + + def get_annotations( + self, + deep_copy: bool = True + ) -> list[Annotation]: + if deep_copy: + return [entry.annotation.model_copy() for entry in self._timestamped_annotations.values()] + return [entry.annotation for entry in self._timestamped_annotations.values()] + + +class PoseSolverException(MCTError): + message: str + + def __init__(self, message: str, *args, **kwargs): + super().__init__(args, kwargs) + self.message = message + + +class PoseSolver: + """ + Class containing the actual "solver" logic, kept separate from the API. + """ + + class Configuration(BaseModel): + minimum_detector_count: int = Field(default=2) + ray_intersection_maximum_distance: float = Field(default=10.0, description="millimeters") + icp_termination_iteration_count: int = Field(default=50) + icp_termination_translation: float = Field(default=0.005, description="millimeters") + icp_termination_rotation_radians: float = Field(default=0.0005) + icp_termination_mean_point_distance: float = Field(default=0.1, description="millimeters") + icp_termination_rms_point_distance: float = Field(default=0.1, description="millimeters") + + denoise_outlier_maximum_distance: float = Field(default=10.0) + denoise_outlier_maximum_angle_degrees: float = Field(default=5.0) + denoise_storage_size: int = Field(default=10) + denoise_filter_size: int = Field(default=7) + denoise_required_starting_streak: int = Field(default=3) + + # aruco_pose_estimator_method: int + # SOLVEPNP_ITERATIVE works okay but is susceptible to optical illusions (flipping) + # SOLVEPNP_P3P appears to return nan's on rare occasion + # SOLVEPNP_SQPNP appears to return nan's on rare occasion + # SOLVEPNP_IPPE_SQUARE does not seem to work very well at all, translation is much smaller than expected + + # bookkeeping + _last_change_timestamp_utc: datetime.datetime + _last_updated_timestamp_utc: datetime.datetime + + # inputs + _configuration: Configuration + _intrinsics_by_detector_label: dict[str, IntrinsicParameters] + _extrinsics_by_detector_label: dict[str, Matrix4x4] + _targets: list[Target] # First target is considered the "reference" + # input per frame + _detector_records_by_detector_label: dict[str, _DetectorRecord] + + # use this to make sure each marker is associated uniquely to a single target + _landmark_target_map: dict[str, Target] # Each marker shall be used at most once by a single target + + # outputs + _poses_by_target_label: dict[str, Matrix4x4] + _poses_by_detector_label: dict[str, Matrix4x4] + + def __init__( + self + ): + self._last_change_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + self._last_updated_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + + self._configuration = PoseSolver.Configuration() + self._intrinsics_by_detector_label = dict() + self._extrinsics_by_detector_label = dict() + self._targets = list() + self._detector_records_by_detector_label = dict() + + self._landmark_target_map = dict() + + self._poses_by_target_label = dict() + self._poses_by_detector_label = dict() + + def add_detector_frame( + self, + detector_label: str, + frame_annotations: list[Annotation], + frame_timestamp_utc: datetime.datetime + ) -> None: + if detector_label not in self._detector_records_by_detector_label: + self._detector_records_by_detector_label[detector_label] = _DetectorRecord() + self._detector_records_by_detector_label[detector_label].clear_frame_records() + self._detector_records_by_detector_label[detector_label].add_frame_record( + frame_annotations=frame_annotations, + frame_timestamp_utc=frame_timestamp_utc) + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def add_target( + self, + target: Target + ) -> None: + for existing_target in self._targets: + if target.label == existing_target.label: + raise PoseSolverException( + f"Target with name {target.label} is already registered. " + f"Please use a different name, and also make sure you are not adding the same target twice.") + landmark_labels: list[str] = [landmark.feature_label for landmark in target.landmarks] + for landmark_label in landmark_labels: + if landmark_label in self._landmark_target_map: + target_id: str = self._landmark_target_map[landmark_label].label + raise PoseSolverException( + f"Landmark {landmark_label} is already used with target {target_id} and it cannot be reused.") + target_index = len(self._targets) + self._targets.append(target) + for landmark_label in landmark_labels: + self._landmark_target_map[landmark_label] = self._targets[target_index] + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def clear_extrinsic_matrices(self): + self._extrinsics_by_detector_label.clear() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def clear_intrinsic_parameters(self): + self._intrinsics_by_detector_label.clear() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def clear_targets(self): + self._targets.clear() + self._landmark_target_map.clear() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def get_poses( + self + ) -> tuple[list[Pose], list[Pose]]: + """ + Returns detector_poses, target_poses + """ + detector_poses: list[Pose] = [ + Pose( + target_id=detector_label, + object_to_reference_matrix=pose, + solver_timestamp_utc_iso8601=self._last_updated_timestamp_utc.isoformat()) + for detector_label, pose in self._poses_by_detector_label.items()] + target_poses: list[Pose] = [ + Pose( + target_id=str(target_id), + object_to_reference_matrix=pose, + solver_timestamp_utc_iso8601=self._last_updated_timestamp_utc.isoformat()) + for target_id, pose in self._poses_by_target_label.items()] + return detector_poses, target_poses + + def list_targets(self) -> list[Target]: + return self._targets + + def set_extrinsic_matrix( + self, + detector_label: str, + transform_to_reference: Matrix4x4 + ) -> None: + self._extrinsics_by_detector_label[detector_label] = transform_to_reference + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def set_intrinsic_parameters( + self, + detector_label: str, + intrinsic_parameters: IntrinsicParameters + ) -> None: + self._intrinsics_by_detector_label[detector_label] = intrinsic_parameters + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def set_reference_target( + self, + target_id: str + ) -> None: + found: bool = False + for target_index, target in enumerate(self._targets): + if target.label == target_id: + self._targets[0], self._targets[target_index] = self._targets[target_index], self._targets[0] + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + found = True + break + if not found: + raise PoseSolverException(f"{target_id} was not found.") + + def set_targets( + self, + targets: list[Target] + ) -> None: + self._targets = targets + self._poses_by_target_label.clear() + self._poses_by_detector_label.clear() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + + def _calculate_reprojection_error_for_pose( + self, + ray_set: ..., + object_points_target: list[list[float]], + object_to_reference_translation: list[float], + object_to_reference_rotation_quaternion: list[float] + ) -> float: + object_to_reference_matrix = numpy.identity(4, dtype="float32") + # noinspection PyArgumentList + object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(object_to_reference_rotation_quaternion).as_matrix() + object_to_reference_matrix[0:3, 3] = object_to_reference_translation + object_points_reference = numpy.empty((len(object_points_target), 3), dtype="float32") + for object_point_index, object_point_target in enumerate(object_points_target): + object_point_reference = numpy.matmul( + object_to_reference_matrix, + [object_point_target[0], object_point_target[1], object_point_target[2], 1.0]) + object_points_reference[object_point_index, 0:3] = object_point_reference[0:3] + detector_label: str = ray_set.detector_label + reference_to_detector_matrix: Matrix4x4 = ray_set.detector_to_reference_matrix + reference_to_detector: numpy.ndarray = reference_to_detector_matrix.as_numpy_array() + # noinspection PyArgumentList + reference_to_detector_rotation_vector = \ + Rotation.as_rotvec(Rotation.from_matrix(reference_to_detector[0:3, 0:3])) + reference_to_detector_translation = reference_to_detector[0:3, 3] + camera_matrix = numpy.array(self._intrinsics_by_detector_label[detector_label].get_matrix(), dtype="float32") + camera_distortion_coefficients = \ + numpy.array(self._intrinsics_by_detector_label[detector_label].get_distortion_coefficients(), + dtype="float32") + project_points_result = cv2.projectPoints( + objectPoints=object_points_reference, + rvec=reference_to_detector_rotation_vector, + tvec=reference_to_detector_translation, + cameraMatrix=camera_matrix, + distCoeffs=camera_distortion_coefficients) + projected_points = project_points_result[0] + sum_reprojection_errors_squared: float = 0.0 + for point_index, image_point in enumerate(ray_set.image_points): + reprojection_error_for_point = \ + numpy.linalg.norm(projected_points[point_index, 0, 0:2] - image_point) + sum_reprojection_errors_squared += float(reprojection_error_for_point) ** 2 + mean_reprojection_errors_squared: float = sum_reprojection_errors_squared / len(object_points_target) + rms_reprojection_error = numpy.sqrt(mean_reprojection_errors_squared) + return rms_reprojection_error + + def _denoise_is_pose_pair_outlier( + self, + pose_a_object_to_reference_matrix: numpy.ndarray, + pose_b_object_to_reference_matrix: numpy.ndarray + ) -> bool: + + position_a = pose_a_object_to_reference_matrix[0:3, 3] + position_b = pose_b_object_to_reference_matrix[0:3, 3] + distance_millimeters = numpy.linalg.norm(position_a - position_b) + if distance_millimeters > self._configuration.denoise_outlier_maximum_distance: + return True + + orientation_a = pose_a_object_to_reference_matrix[0:3, 0:3] + orientation_b = pose_b_object_to_reference_matrix[0:3, 0:3] + rotation_a_to_b = numpy.matmul(orientation_a, numpy.linalg.inv(orientation_b)) + # noinspection PyArgumentList + angle_degrees = numpy.linalg.norm(Rotation.from_matrix(rotation_a_to_b).as_rotvec()) + if angle_degrees > self._configuration.denoise_outlier_maximum_angle_degrees: + return True + + return False + + # Take an average of recent poses, with detection and removal of outliers + # TODO: Currently not used - but it's probably a good feature to have + def _denoise_detector_to_reference_pose( + self, + object_label: str, + raw_poses: list[...] # In order, oldest to newest + ) -> ...: + most_recent_pose = raw_poses[-1] + max_storage_size: int = self._configuration.denoise_storage_size + filter_size: int = self._configuration.denoise_filter_size + if filter_size <= 1 or max_storage_size <= 1: + return most_recent_pose # trivial case + + # find a consistent range of recent indices + poses: list[...] = list(raw_poses) + poses.reverse() # now they are sorted so that the first element is most recent + required_starting_streak: int = self._configuration.denoise_required_starting_streak + starting_index: int = -1 # not yet known, we want to find this + if required_starting_streak <= 1: + starting_index = 0 # trivial case + else: + current_inlier_streak: int = 1 # comparison always involves a streak of size 1 or more + for pose_index in range(1, len(raw_poses)): + if self._denoise_is_pose_pair_outlier(poses[pose_index - 1], poses[pose_index]): + current_inlier_streak = 1 + continue + current_inlier_streak += 1 + if current_inlier_streak >= required_starting_streak: + starting_index = pose_index - required_starting_streak + 1 + break + + if starting_index < 0: + if len(poses) >= max_storage_size: # There appear to be enough poses, so data seems to be inconsistent + print( + "Warning: Can't find consistent pose streak. " + "Will use most recent raw pose for object " + object_label + ".") + return most_recent_pose + + poses_to_average: list[...] = [poses[starting_index]] + for pose_index in range(starting_index + 1, len(poses)): + if (not self._denoise_is_pose_pair_outlier(poses[pose_index - 1], poses[pose_index])) or \ + (not self._denoise_is_pose_pair_outlier(poses[starting_index], poses[pose_index])): + poses_to_average.append(poses[pose_index]) + if len(poses_to_average) > filter_size: + break + + translations = [list(pose.object_to_reference_matrix[0:3, 3]) + for pose in poses_to_average] + # noinspection PyArgumentList + orientations = [list(Rotation.from_matrix(pose.object_to_reference_matrix[0:3, 0:3]).as_quat(canonical=True)) + for pose in poses_to_average] + filtered_translation = MathUtils.average_vector(translations) + filtered_orientation = MathUtils.average_quaternion(orientations) + filtered_object_to_reference_matrix = numpy.identity(4, dtype="float32") + # noinspection PyArgumentList + filtered_object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(filtered_orientation).as_matrix() + filtered_object_to_reference_matrix[0:3, 3] = filtered_translation + # return PoseData( + # object_label=object_label, + # object_to_reference_matrix=filtered_object_to_reference_matrix, + # ray_sets=most_recent_pose.ray_sets) + + def update(self) -> None: + if self._last_updated_timestamp_utc >= self._last_change_timestamp_utc: + return + + if len(self._targets) == 0: + return + + self._last_updated_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) + self._poses_by_detector_label.clear() + self._poses_by_target_label.clear() + + annotation_list_by_detector_label: dict[str, list[Annotation]] + annotation_list_by_detector_label = { + detector_label: detector_record.get_annotations(deep_copy=True) + for detector_label, detector_record in self._detector_records_by_detector_label.items()} + detector_labels: list[str] = list(self._detector_records_by_detector_label.keys()) + + for detector_label in detector_labels: + if detector_label not in self._intrinsics_by_detector_label: + # TODO: Output a suitable warning that no intrinsics have been received, but don't do it every frame + del annotation_list_by_detector_label[detector_label] + + reference_target: Target = self._targets[0] + for detector_label in detector_labels: + if detector_label in self._extrinsics_by_detector_label: + self._poses_by_detector_label[detector_label] = self._extrinsics_by_detector_label[detector_label] + else: + intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] + estimated: bool + reference_to_detector: Matrix4x4 | None + estimated, reference_to_detector = MathUtils.estimate_matrix_transform_to_detector( + annotations=annotation_list_by_detector_label[detector_label], + landmarks=reference_target.landmarks, + detector_intrinsics=intrinsics) + if estimated: + detector_to_reference: Matrix4x4 = Matrix4x4.from_numpy_array( + numpy.linalg.inv(reference_to_detector.as_numpy_array())) + self._poses_by_detector_label[detector_label] = detector_to_reference + + # At the time of writing, each feature label can be used only by one target. + # So we can remove annotations whose feature labels match those of the reference_target + # to avoid unnecessary processing. + reference_feature_labels: set[str] = set([landmark.feature_label for landmark in reference_target.landmarks]) + for detector_label in detector_labels: + indices_to_remove: list[int] = list() + for annotation_index, annotation in enumerate(annotation_list_by_detector_label[detector_label]): + if annotation.feature_label in reference_feature_labels: + indices_to_remove.append(annotation_index) + for annotation_index in reversed(indices_to_remove): + annotation_list_by_detector_label[detector_label].pop(annotation_index) + + # Convert annotations to rays + rays_by_feature_and_detector: dict[str, dict[str, Ray]] = dict() # indexed as [feature_label][detector_label] + for detector_label in detector_labels: + if detector_label not in self._poses_by_detector_label: + continue + annotations: list[Annotation] = annotation_list_by_detector_label[detector_label] + annotation_points: list[list[float]] = [[annotation.x_px, annotation.y_px] for annotation in annotations] + detector_to_reference: Matrix4x4 = self._poses_by_detector_label[detector_label] + intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] + ray_origin: list[float] = detector_to_reference.get_translation() + ray_directions = MathUtils.convert_detector_points_to_vectors( + points=annotation_points, + detector_intrinsics=intrinsics, + detector_to_reference_matrix=detector_to_reference) + feature_labels: list[str] = [annotation.feature_label for annotation in annotations] + # note: annotation_labels and ray_directions have a 1:1 correspondence by index + assert len(ray_directions) == len(feature_labels) + for feature_index, feature_label in enumerate(feature_labels): + if feature_label not in rays_by_feature_and_detector: + rays_by_feature_and_detector[feature_label] = dict() + rays_by_feature_and_detector[feature_label][detector_label] = Ray( + source_point=ray_origin, + direction=ray_directions[feature_index]) + + # intersect rays to find the 3D points for each feature, in reference coordinates + # If intersection is not possible, then still note that rays exist via standalone_ray_feature_labels + intersections_by_feature_label: dict[str, list[float]] = dict() # [feature_label][dimension_index] + feature_labels_with_rays_only: list[str] = list() + for feature_label, rays_by_detector_label in rays_by_feature_and_detector.items(): + intersection_result = MathUtils.closest_intersection_between_n_lines( + rays=list(rays_by_detector_label.values()), + maximum_distance=self._configuration.ray_intersection_maximum_distance) + if intersection_result.centroids.shape[0] == 0: + feature_labels_with_rays_only.append(feature_label) + break + intersections_by_feature_label[feature_label] = list(intersection_result.centroid().flatten()) + + # We estimate the pose of each target based on the calculated intersections + # and the rays projected from each detector + for target in self._targets: + if target.label == str(reference_target.label): + continue # everything is expressed relative to the reference, so it's a "known" coordinate system + feature_labels_in_target: list[str] = [landmark.feature_label for landmark in target.landmarks] + + target_feature_labels_with_intersections: list[str] = list() + target_feature_labels_with_rays: list[str] = list() + detector_labels_seeing_target: set[str] = set() + for target_feature_label in feature_labels_in_target: + if target_feature_label in intersections_by_feature_label: + target_feature_labels_with_intersections.append(target_feature_label) + if target_feature_label in feature_labels_with_rays_only: + target_feature_labels_with_rays.append(target_feature_label) + detector_labels_seeing_target |= set(rays_by_feature_and_detector[target_feature_label].keys()) + + if len(target_feature_labels_with_intersections) <= 0 and len(target_feature_labels_with_rays) <= 0: + continue # No information on which to base a pose + + detector_count_seeing_target: int = len(detector_labels_seeing_target) + if detector_count_seeing_target < self._configuration.minimum_detector_count or \ + detector_count_seeing_target <= 0: + continue + + one_detector_only: bool = (len(detector_labels_seeing_target) == 1) + if one_detector_only: + # Note: there cannot be any intersections in this case + detector_label: str = next(iter(detector_labels_seeing_target)) + intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] + estimated: bool + detected_to_detector_matrix4x4: Matrix4x4 + estimated, detected_to_detector_matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + annotations=annotation_list_by_detector_label[detector_label], + landmarks=target.landmarks, + detector_intrinsics=intrinsics) + if estimated: + detected_to_detector: numpy.ndarray = detected_to_detector_matrix4x4.as_numpy_array() + detector_to_reference: numpy.ndarray = self._poses_by_detector_label[detector_label].as_numpy_array() + detected_to_reference: numpy.ndarray = detector_to_reference @ detected_to_detector + self._poses_by_target_label[target.label] = Matrix4x4.from_numpy_array(detected_to_reference) + else: + # Fill in the required variables for the customized iterative closest point + detected_known_points: list[list[float]] = [ + target.get_landmark_point(feature_label) + for feature_label in target_feature_labels_with_intersections] + reference_known_points: list[list[float]] = [ + intersections_by_feature_label[feature_label] + for feature_label in target_feature_labels_with_intersections] + detected_ray_points: list[list[float]] = [ + target.get_landmark_point(feature_label) + for feature_label in target_feature_labels_with_rays] + reference_rays: list[Ray] = list(itertools.chain.from_iterable([ + list(rays_by_feature_and_detector[feature_label].values()) + for feature_label in target_feature_labels_with_rays])) + iterative_closest_point_parameters = IterativeClosestPointParameters( + termination_iteration_count=self._configuration.icp_termination_iteration_count, + termination_delta_translation=self._configuration.icp_termination_translation, + termination_delta_rotation_radians=self._configuration.icp_termination_rotation_radians, + termination_mean_point_distance=self._configuration.icp_termination_mean_point_distance, + termination_rms_point_distance=self._configuration.icp_termination_rms_point_distance) + if len(target_feature_labels_with_intersections) >= 1: + initial_detected_to_reference_matrix = MathUtils.register_corresponding_points( + point_set_from=detected_known_points, + point_set_to=reference_known_points, + collinearity_do_check=False) + icp_output = MathUtils.iterative_closest_point_for_points_and_rays( + source_known_points=detected_known_points, + target_known_points=reference_known_points, + source_ray_points=detected_ray_points, + target_rays=reference_rays, + initial_transformation_matrix=initial_detected_to_reference_matrix, + parameters=iterative_closest_point_parameters) + else: + icp_output = MathUtils.iterative_closest_point_for_points_and_rays( + source_known_points=detected_known_points, + target_known_points=reference_known_points, + source_ray_points=detected_ray_points, + target_rays=reference_rays, + parameters=iterative_closest_point_parameters) + self._poses_by_target_label[target.label] = icp_output.source_to_target_matrix diff --git a/src/common/serialization.py b/src/common/serialization.py new file mode 100644 index 0000000..89ae192 --- /dev/null +++ b/src/common/serialization.py @@ -0,0 +1,345 @@ +from .status import MCTError +import abc +import hjson +import json +import os +from pydantic import BaseModel, Field, ValidationError +from typing import Any, Final, Callable, Literal, TypeVar, Union + + +class KeyValueSimpleAbstract(BaseModel, abc.ABC): + """ + Abstract class to represent a key-value pair. + Intended use: Setting parameter over network, serialization through JSON. + """ + parsable_type: str + key: str = Field() + + +class KeyValueSimpleBool(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "bool" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: bool = Field() + + +class KeyValueSimpleFloat(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "float" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: float = Field() + + +class KeyValueSimpleInt(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "int" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: int = Field() + + +class KeyValueSimpleString(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "str" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: str = Field() + + +class KeyValueMetaAbstract(BaseModel, abc.ABC): + """ + Abstract class to represent a key-value pair, and additional data about the datum (range, description, etc) + Intended use: Getting parameter over network, serialization through JSON. + """ + parsable_type: str + key: str = Field() + + @abc.abstractmethod + def to_simple(self) -> KeyValueSimpleAbstract: ... + + +class KeyValueMetaBool(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "bool" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: bool = Field() + + def to_simple(self) -> KeyValueSimpleBool: + return KeyValueSimpleBool(key=self.key, value=self.value) + + +class KeyValueMetaEnum(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "enum" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: str = Field() + allowable_values: list[str] = Field(default_factory=list) + + def to_simple(self) -> KeyValueSimpleString: + return KeyValueSimpleString(key=self.key, value=self.value) + + +class KeyValueMetaFloat(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "float" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: float = Field() + range_minimum: float = Field() + range_maximum: float = Field() + range_step: float = Field(default=1.0) + digit_count: int = Field(default=2) + + def to_simple(self) -> KeyValueSimpleFloat: + return KeyValueSimpleFloat(key=self.key, value=self.value) + + +class KeyValueMetaInt(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "int" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: int = Field() + range_minimum: int = Field() + range_maximum: int = Field() + range_step: int = Field(default=1) + + def to_simple(self) -> KeyValueSimpleInt: + return KeyValueSimpleInt(key=self.key, value=self.value) + + +# pydantic doesn't appear to handle very well typing's (TypeA, TypeB, ...) notation of a union +KeyValueSimpleAny = Union[ + KeyValueSimpleBool, + KeyValueSimpleString, + KeyValueSimpleFloat, + KeyValueSimpleInt] +KeyValueMetaAny = Union[ + KeyValueMetaBool, + KeyValueMetaEnum, + KeyValueMetaFloat, + KeyValueMetaInt] + + +DeserializableT = TypeVar('DeserializableT', bound='MCTDeserializable') + + +class MCTSerializationError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class MCTDeserializable(abc.ABC): + + @staticmethod + @abc.abstractmethod + def type_identifier() -> str: + pass + + @staticmethod + def deserialize_series_list( + series_dict: dict, + supported_types: dict[str, type[DeserializableT]] + ) -> list[DeserializableT]: + if "series" not in series_dict or not isinstance(series_dict["series"], list): + message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." + raise MCTSerializationError(message) + + output_series: list[DeserializableT] = list() + for parsable_dict in series_dict["series"]: + if not isinstance(parsable_dict, dict): + message: str = "series contained a non-dict element. Input is improperly formatted." + raise MCTSerializationError(message) + output_series.append(MCTDeserializable.deserialize_single( + single_dict=parsable_dict, + supported_types=supported_types)) + + return output_series + + @staticmethod + def deserialize_single( + single_dict: dict, + supported_types: dict[str, type[DeserializableT]] + ) -> DeserializableT: + if "parsable_type" not in single_dict or not isinstance(single_dict["parsable_type"], str): + message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." + raise MCTSerializationError(message) from None + + parsable_type: str = single_dict["parsable_type"] + if parsable_type not in supported_types: + message: str = "parsable_type did not match any expected value. Input is improperly formatted." + raise MCTSerializationError(message) + + supported_type: type[DeserializableT] = supported_types[parsable_type] + deserializable: DeserializableT + try: + deserializable = supported_type(**single_dict) + except ValidationError as e: + raise MCTSerializationError( + f"A deserializable of type {supported_type} was ill-formed: {str(e)}") from None + return deserializable + + +class IOUtils: + """ + static class for IO-related utility functions. + """ + + PathType = Literal["filepath", "path"] + + @staticmethod + def exists( + path: str, + pathtype: PathType, + on_error_for_user: Callable[[str], Any], + on_error_for_dev: Callable[[str], Any], + create_path: bool = False + ) -> bool: + """ + :param path: Location of target + :param pathtype: "filepath" (target is file) or "path" (target is directory) + :param on_error_for_user: + Function to supply with a publicly-viewable string (message viewable by end-user) in the event of an error. + :param on_error_for_dev: + Function to supply with a developer-friendly string (not by end-user) in the event of an error. + :param create_path: + If pathtype == "path" and this is True, try to create the path if it does not exist. Default False. + :return: True if there is a file/path at the indicated path, otherwise False. + """ + exists: bool = os.path.exists(path) + if pathtype == "filepath": + if exists and not os.path.isfile(path): + on_error_for_user( + "Filepath location exists but is not a file. " + "Most likely a directory exists at that location, " + "and it needs to be manually removed.") + on_error_for_dev( + f"Specified filepath location {path} exists but is not a file.") + return False + elif pathtype == "path": + if exists and not os.path.isdir(path): + on_error_for_user( + "Path location exists but is not a path. " + "Most likely a file exists at that location, " + "and it needs to be manually removed.") + on_error_for_dev( + f"Specified path location {path} exists but is not a path.") + return False + if create_path and not exists: + os.makedirs(name=path, exist_ok=True) + exists = os.path.exists(path) + return exists + + @staticmethod + def hjson_read( + filepath: str, + on_error_for_user: Callable[[str], Any], + on_error_for_dev: Callable[[str], Any] + ) -> dict | None: + """ + :param filepath: + :param on_error_for_user: + Function to supply with a publicly-viewable string (message viewable by end-user) in the event of an error. + :param on_error_for_dev: + Function to supply with a developer-friendly string (not by end-user) in the event of an error. + :return: Dictionary representing the JSON data if successful, otherwise None + """ + if not IOUtils.exists( + path=filepath, + pathtype="filepath", + on_error_for_user=on_error_for_user, + on_error_for_dev=on_error_for_dev + ): + return None + json_dict: dict + try: + with open(filepath, 'r', encoding='utf-8') as input_file: + json_dict = hjson.load(input_file) + except OSError as e: + on_error_for_user("An unexpected file I/O error happened while reading a file.") + on_error_for_dev(str(e)) + return None + return json_dict + + @staticmethod + def json_read( + filepath: str, + on_error_for_user: Callable[[str], Any], + on_error_for_dev: Callable[[str], Any] + ) -> dict | None: + """ + Generally it is better to use hjson_read instead, which is less strict and supports comments. + This function currently forwards to hjson_ready anyway. + """ + return IOUtils.hjson_read( + filepath=filepath, + on_error_for_user=on_error_for_user, + on_error_for_dev=on_error_for_dev) + + @staticmethod + def json_write( + filepath: str, + json_dict: dict, + on_error_for_user: Callable[[str], Any], + on_error_for_dev: Callable[[str], Any], + ignore_none: bool = False, + indent: int = 4 + ) -> bool: + """ + :param filepath: + :param json_dict: + :param on_error_for_user: + Function to supply with a publicly-viewable string (message viewable by end-user) in the event of an error. + :param on_error_for_dev: + Function to supply with a developer-friendly string (not by end-user) in the event of an error. + :param ignore_none: Explicitly ignore (don't write) any key-value pairs where the value is None. Default False. + :param indent: Width (spaces) of each indentation level. Default 4. + :return: True if the file was written, otherwise False. + """ + filepath = os.path.join(filepath) + path = os.path.dirname(filepath) + if not IOUtils.exists( + path=path, + pathtype="path", + on_error_for_user=on_error_for_user, + on_error_for_dev=on_error_for_dev, + create_path=True + ): + return False + if ignore_none: + json_dict = IOUtils._remove_all_none_from_dict_recursive(json_dict) + try: + with open(filepath, 'w', encoding='utf-8') as output_file: + json.dump(json_dict, output_file, sort_keys=False, indent=indent) + except OSError as e: + on_error_for_user("An unexpected file I/O error happened while writing a file.") + on_error_for_dev(str(e)) + return False + return True + + @staticmethod + def _remove_all_none_from_dict_recursive( + input_dict: dict + ) -> dict: + output_dict = dict(input_dict) + for key in output_dict.keys(): + if isinstance(output_dict[key], dict): + output_dict[key] = IOUtils._remove_all_none_from_dict_recursive(output_dict[key]) + elif output_dict[key] is None: + del output_dict[key] + return output_dict diff --git a/src/common/standard_resolutions.py b/src/common/standard_resolutions.py deleted file mode 100644 index f65081b..0000000 --- a/src/common/standard_resolutions.py +++ /dev/null @@ -1,25 +0,0 @@ -from .structures import ImageResolution -from typing import Final - - -class StandardResolutions: - RES_640X360: Final[ImageResolution] = ImageResolution(x_px=640, y_px=360) - RES_640X480: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) - RES_800X600: Final[ImageResolution] = ImageResolution(x_px=800, y_px=600) - RES_1024X768: Final[ImageResolution] = ImageResolution(x_px=1024, y_px=768) - RES_1280X720: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=720) - RES_1280X800: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=800) - RES_1280X1024: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=1024) - RES_1920X1080: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) - - @staticmethod - def as_list(): - return [ - StandardResolutions.RES_640X360, - StandardResolutions.RES_640X480, - StandardResolutions.RES_800X600, - StandardResolutions.RES_1024X768, - StandardResolutions.RES_1280X720, - StandardResolutions.RES_1280X800, - StandardResolutions.RES_1280X1024, - StandardResolutions.RES_1920X1080] diff --git a/src/common/status_message_source.py b/src/common/status.py similarity index 75% rename from src/common/status_message_source.py rename to src/common/status.py index 928a70d..fe34e1b 100644 --- a/src/common/status_message_source.py +++ b/src/common/status.py @@ -1,13 +1,45 @@ -from src.common.structures import \ - SeverityLabel, \ - StatusMessage import datetime +from enum import StrEnum import logging +from pydantic import BaseModel, Field +from typing import Final + logger = logging.getLogger(__name__) +class MCTError(Exception): + def __init__(self, *args): + super().__init__(*args) + + +class SeverityLabel(StrEnum): + DEBUG = "debug" + INFO = "info" + WARNING = "warning" + ERROR = "error" + CRITICAL = "critical" + + +SEVERITY_LABEL_TO_INT: Final[dict[SeverityLabel, int]] = { + SeverityLabel.DEBUG: logging.DEBUG, + SeverityLabel.INFO: logging.INFO, + SeverityLabel.WARNING: logging.WARNING, + SeverityLabel.ERROR: logging.ERROR, + SeverityLabel.CRITICAL: logging.CRITICAL} + + +class StatusMessage(BaseModel): + source_label: str = Field() + severity: SeverityLabel = Field() + message: str + timestamp_utc_iso8601: str + + class StatusMessageSource: + """ + Class to facilitate the management of status messages sent between components + """ _source_label: str @@ -34,13 +66,13 @@ def add_status_subscriber( message: str = f"{subscriber_label} is now listening for status messages." self.enqueue_status_message( source_label=self._source_label, - severity="debug", + severity=SeverityLabel.DEBUG, message=message) else: message: str = f"{subscriber_label} is already in status message outboxes." self.enqueue_status_message( source_label=self._source_label, - severity="error", + severity=SeverityLabel.ERROR, message=message) def get_source_label(self) -> str: @@ -56,7 +88,7 @@ def enqueue_status_message( if not source_label: source_label = self._source_label if not timestamp_utc_iso8601: - timestamp_utc_iso8601 = datetime.datetime.utcnow().isoformat() + timestamp_utc_iso8601 = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() elif isinstance(timestamp_utc_iso8601, datetime.datetime): timestamp_utc_iso8601 = timestamp_utc_iso8601.isoformat() message: StatusMessage = StatusMessage( diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py deleted file mode 100644 index f721c23..0000000 --- a/src/common/structures/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -from .capture_format import CaptureFormat -from .charuco_board_specification import CharucoBoardSpecification -from .component_role_label import \ - ComponentRoleLabel, \ - COMPONENT_ROLE_LABEL_DETECTOR, \ - COMPONENT_ROLE_LABEL_POSE_SOLVER -from .corner_refinement import \ - CornerRefinementMethod, \ - CORNER_REFINEMENT_METHOD_NONE, \ - CORNER_REFINEMENT_METHOD_SUBPIX, \ - CORNER_REFINEMENT_METHOD_CONTOUR,\ - CORNER_REFINEMENT_METHOD_APRILTAG, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT -from .detector_frame import DetectorFrame -from .image_resolution import ImageResolution -from .intrinsic_calibration import \ - IntrinsicCalibration, \ - IntrinsicCalibrationFrameResult -from .intrinsic_parameters import IntrinsicParameters -from .key_value_structures import \ - KeyValueSimpleAbstract, \ - KeyValueSimpleAny, \ - KeyValueSimpleBool, \ - KeyValueSimpleString, \ - KeyValueSimpleFloat, \ - KeyValueSimpleInt, \ - KeyValueMetaAbstract, \ - KeyValueMetaAny, \ - KeyValueMetaBool, \ - KeyValueMetaEnum, \ - KeyValueMetaFloat, \ - KeyValueMetaInt, \ - key_value_meta_to_simple -from .marker_corner_image_point import MarkerCornerImagePoint -from .marker_corners import MarkerCorners -from .marker_definition import MarkerDefinition -from .marker_snapshot import MarkerSnapshot -from .matrix4x4 import Matrix4x4 -from .mct_parsable import MCTParsable -from .pose import Pose -from .pose_solver_frame import PoseSolverFrame -from .pose_solver_status import PoseSolverStatus -from .status_message import \ - SeverityLabel, \ - SEVERITY_LABEL_DEBUG, \ - SEVERITY_LABEL_INFO, \ - SEVERITY_LABEL_WARNING, \ - SEVERITY_LABEL_ERROR, \ - SEVERITY_LABEL_CRITICAL, \ - SEVERITY_LABEL_TO_INT, \ - StatusMessage -from .target import \ - Marker, \ - TargetBase, \ - TargetBoard, \ - TargetMarker -from .vec3 import Vec3 diff --git a/src/common/structures/aruco_board_specification.py b/src/common/structures/aruco_board_specification.py deleted file mode 100644 index b4e5c49..0000000 --- a/src/common/structures/aruco_board_specification.py +++ /dev/null @@ -1,28 +0,0 @@ -from pydantic import BaseModel, validator -from .vec3 import Vec3 -import json, hjson - -class BoardMarker(BaseModel): - marker_id: int - points: list[Vec3] - - @validator - def check_points_length(cls, values): - points = values.get('points') - if points is not None and len(points) != 4: - raise ValueError("The list of points must have exactly four elements") - return values - - -class Board(BaseModel): - board_markers: list[BoardMarker] - -def read_file(input_filepath: str) -> Board: - with open(input_filepath, 'r') as file: - data = hjson.load(file) - return Board(**data) - - -def write_file(output_filepath: str, output_board: Board) -> None: - with open(output_filepath, 'w') as file: - json.dump(output_board.as_dict(), file, indent=4) diff --git a/src/common/structures/capture_format.py b/src/common/structures/capture_format.py deleted file mode 100644 index 3798735..0000000 --- a/src/common/structures/capture_format.py +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Literal - - -CaptureFormat = Literal[".png", ".jpg"] diff --git a/src/common/structures/charuco_board_specification.py b/src/common/structures/charuco_board_specification.py deleted file mode 100644 index a8593bd..0000000 --- a/src/common/structures/charuco_board_specification.py +++ /dev/null @@ -1,86 +0,0 @@ -from pydantic import BaseModel, Field -from typing import Any, Tuple -import cv2.aruco - - -class CharucoBoardSpecification(BaseModel): - dictionary_name: str = Field(default="DICT_4X4_100") - square_count_x: int = Field(default=8) - square_count_y: int = Field(default=10) - square_size_px: int = Field(default=800) - marker_size_px: int = Field(default=400) - px_per_mm: float = Field(default=40) - - def aruco_dictionary(self) -> Any: # type cv2.aruco.Dictionary - if self.dictionary_name != "DICT_4X4_100": - raise NotImplementedError("Only DICT_4X4_100 is currently implemented") - aruco_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) - return aruco_dictionary - - def size_px(self) -> Tuple[float, float]: - board_size_x_px = self.square_count_x * self.square_size_px - board_size_y_px = self.square_count_y * self.square_size_px - return board_size_x_px, board_size_y_px - - def size_mm(self) -> Tuple[float, float]: - board_size_x_mm = self.square_count_x * self.square_size_px / self.px_per_mm - board_size_y_mm = self.square_count_y * self.square_size_px / self.px_per_mm - return board_size_x_mm, board_size_y_mm - - def create_board(self) -> Any: # type cv2.aruco.CharucoBoard - charuco_board = cv2.aruco.CharucoBoard_create( - self.square_count_x, - self.square_count_y, - self.square_size_px, - self.marker_size_px, - self.aruco_dictionary()) - return charuco_board - - def get_marker_center_points(self) -> list[list[float]]: - """ - Note that the coordinates assume (based on portrait orientation): - origin: at bottom-left of board - x-axis: goes right - y-axis: goes up the page - z-axis: comes out of the image and toward the viewer - """ - points = [] - for y in range(self.square_count_y): - for x in range(self.square_count_x): - if (x + y) % 2 == 1: # Only add the points for the white squares - point_x = (x + 0.5) * self.square_size_px / self.px_per_mm - point_y = (self.square_count_y - y - 0.5) * self.square_size_px / self.px_per_mm - points.append([point_x, point_y, 0.0]) - return points - - def get_marker_corner_points(self) -> list[list[float]]: - """ - Note that the coordinates assume the same axes as get_marker_center_points, - but the origin is in the center of the board, not the bottom-left corner. - """ - points = [] - marker_size_mm: float = self.marker_size_px / self.px_per_mm - square_size_mm: float = self.square_size_px / self.px_per_mm - for y_sq in range(self.square_count_y): - for x_sq in range(self.square_count_x): - if (x_sq + y_sq) % 2 == 1: # Only add the points for the white squares - x_sq_centered: float = x_sq - (self.square_count_x / 2.0) - y_sq_centered: float = y_sq - (self.square_count_y / 2.0) - for corner_index in range(0, 4): - x_mm: float = (x_sq_centered + 0.5) * square_size_mm - if corner_index == 0 or corner_index == 3: - x_mm -= (marker_size_mm / 2.0) - else: - x_mm += (marker_size_mm / 2.0) - y_mm: float = (-(y_sq_centered + 0.5)) * square_size_mm - if corner_index == 0 or corner_index == 1: - y_mm += (marker_size_mm / 2.0) - else: - y_mm -= (marker_size_mm / 2.0) - z_mm: float = 0.0 - points.append([x_mm, y_mm, z_mm]) - return points - - def get_marker_ids(self) -> list[int]: - num_markers = self.square_count_x * self.square_count_y // 2 - return list(range(num_markers)) diff --git a/src/common/structures/component_role_label.py b/src/common/structures/component_role_label.py deleted file mode 100644 index edbcf17..0000000 --- a/src/common/structures/component_role_label.py +++ /dev/null @@ -1,5 +0,0 @@ -from typing import Final, Literal - -COMPONENT_ROLE_LABEL_DETECTOR: Final[str] = "detector" -COMPONENT_ROLE_LABEL_POSE_SOLVER: Final[str] = "pose_solver" -ComponentRoleLabel = Literal["detector", "pose_solver"] diff --git a/src/common/structures/corner_refinement.py b/src/common/structures/corner_refinement.py deleted file mode 100644 index 9e6a1bd..0000000 --- a/src/common/structures/corner_refinement.py +++ /dev/null @@ -1,20 +0,0 @@ -import cv2 -from typing import Final, Literal - -CornerRefinementMethod = Literal["NONE", "SUBPIX", "CONTOUR", "APRILTAG"] -CORNER_REFINEMENT_METHOD_NONE: Final[str] = 'NONE' -CORNER_REFINEMENT_METHOD_SUBPIX: Final[str] = 'SUBPIX' -CORNER_REFINEMENT_METHOD_CONTOUR: Final[str] = 'CONTOUR' -CORNER_REFINEMENT_METHOD_APRILTAG: Final[str] = 'APRILTAG' - -CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: dict[CornerRefinementMethod, int] = { - "NONE": cv2.aruco.CORNER_REFINE_NONE, - "SUBPIX": cv2.aruco.CORNER_REFINE_SUBPIX, - "CONTOUR": cv2.aruco.CORNER_REFINE_CONTOUR, - "APRILTAG": cv2.aruco.CORNER_REFINE_APRILTAG} - -CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: dict[int, CornerRefinementMethod] = { - cv2.aruco.CORNER_REFINE_NONE: "NONE", - cv2.aruco.CORNER_REFINE_SUBPIX: "SUBPIX", - cv2.aruco.CORNER_REFINE_CONTOUR: "CONTOUR", - cv2.aruco.CORNER_REFINE_APRILTAG: "APRILTAG"} diff --git a/src/common/structures/detector_frame.py b/src/common/structures/detector_frame.py deleted file mode 100644 index aef2b36..0000000 --- a/src/common/structures/detector_frame.py +++ /dev/null @@ -1,14 +0,0 @@ -from .image_resolution import ImageResolution -from .marker_snapshot import MarkerSnapshot -import datetime -from pydantic import BaseModel, Field - - -class DetectorFrame(BaseModel): - detected_marker_snapshots: list[MarkerSnapshot] | None = Field() - rejected_marker_snapshots: list[MarkerSnapshot] | None = Field() - timestamp_utc_iso8601: str = Field() - image_resolution: ImageResolution = Field() - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/structures/image_resolution.py b/src/common/structures/image_resolution.py deleted file mode 100644 index 92c4a0a..0000000 --- a/src/common/structures/image_resolution.py +++ /dev/null @@ -1,42 +0,0 @@ -from pydantic import BaseModel, Field - - -class ImageResolution(BaseModel): - x_px: int = Field() - y_px: int = Field() - - def __eq__(self, other) -> bool: - if type(self) is not type(other): - return False - return \ - self.x_px == other.x_px and \ - self.y_px == other.y_px - - def __hash__(self) -> int: - return hash(str(self)) - - def __lt__(self, other): - if not isinstance(other, ImageResolution): - raise ValueError() - if self.x_px < other.x_px: - return True - elif self.x_px > other.x_px: - return False - elif self.y_px < other.y_px: - return True - else: - return False - - def __str__(self): - return f"{self.x_px}x{self.y_px}" - - @staticmethod - def from_str(in_str: str) -> 'ImageResolution': - if 'x' not in in_str: - raise ValueError("in_str is expected to contain delimiter 'x'.") - parts: list[str] = in_str.split('x') - if len(parts) > 2: - raise ValueError("in_str is expected to contain exactly one 'x'.") - x_px = int(parts[0]) - y_px = int(parts[1]) - return ImageResolution(x_px=x_px, y_px=y_px) diff --git a/src/common/structures/intrinsic_calibration.py b/src/common/structures/intrinsic_calibration.py deleted file mode 100644 index 5388aec..0000000 --- a/src/common/structures/intrinsic_calibration.py +++ /dev/null @@ -1,24 +0,0 @@ -from .image_resolution import ImageResolution -from .intrinsic_parameters import IntrinsicParameters -from .key_value_structures import KeyValueSimpleAny -from .vec3 import Vec3 -from pydantic import BaseModel, Field, SerializeAsAny - - -class IntrinsicCalibrationFrameResult(BaseModel): - image_identifier: str = Field() - translation: Vec3 = Field() - rotation: Vec3 = Field() - translation_stdev: Vec3 = Field() - rotation_stdev: Vec3 = Field() - reprojection_error: float = Field() - - -class IntrinsicCalibration(BaseModel): - timestamp_utc: str = Field() - image_resolution: ImageResolution = Field() - reprojection_error: float = Field() - calibrated_values: IntrinsicParameters = Field() - calibrated_stdevs: list[float] = Field() - marker_parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() - frame_results: list[IntrinsicCalibrationFrameResult] = Field(default=list()) diff --git a/src/common/structures/intrinsic_parameters.py b/src/common/structures/intrinsic_parameters.py deleted file mode 100644 index b510a7e..0000000 --- a/src/common/structures/intrinsic_parameters.py +++ /dev/null @@ -1,70 +0,0 @@ -from pydantic import BaseModel, Field -import math - - -class IntrinsicParameters(BaseModel): - """ - Camera intrinsic parameters (focal length, optical center, distortion coefficients). - See OpenCV's documentation: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html - See Wikipedia article: https://en.wikipedia.org/wiki/Distortion_%28optics%29 - """ - - focal_length_x_px: float = Field() - focal_length_y_px: float = Field() - optical_center_x_px: float = Field() - optical_center_y_px: float = Field() - - radial_distortion_coefficients: list[float] = Field() # k1, k2, k3 etc in OpenCV - - tangential_distortion_coefficients: list[float] = Field() # p1, p2 in OpenCV - - def as_array(self) -> list[float]: - return_value: list[float] = [ - self.focal_length_x_px, - self.focal_length_y_px, - self.optical_center_x_px, - self.optical_center_y_px] - return_value += self.get_distortion_coefficients() - return return_value - - def get_matrix(self) -> list[list[float]]: - """calibration matrix expected by OpenCV in some operations""" - return \ - [[self.focal_length_x_px, 0.0, self.optical_center_x_px], - [0.0, self.focal_length_y_px, self.optical_center_y_px], - [0.0, 0.0, 1.0]] - - def get_distortion_coefficients(self) -> list[float]: - """ - Distortion coefficients in array format expected by OpenCV in some operations. - See https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d - calibrateCamera() documentation describes order of distortion coefficients that OpenCV works with - """ - coefficients: list[float] = [ - self.radial_distortion_coefficients[0], - self.radial_distortion_coefficients[1], - self.tangential_distortion_coefficients[0], - self.tangential_distortion_coefficients[1]] - coefficients += self.radial_distortion_coefficients[2:] - return coefficients - - @staticmethod - def generate_zero_parameters( - resolution_x_px: int, - resolution_y_px: int, - fov_x_degrees: float = 45.0, - fov_y_degrees: float = 45.0 - ) -> "IntrinsicParameters": - optical_center_x_px: int = int(round(resolution_x_px/2.0)) - fov_x_radians: float = fov_x_degrees * math.pi / 180.0 - focal_length_x_px = (resolution_x_px / 2.0) / math.tan(fov_x_radians / 2.0) - optical_center_y_px: int = int(round(resolution_y_px/2.0)) - fov_y_radians: float = fov_y_degrees * math.pi / 180.0 - focal_length_y_px = (resolution_y_px / 2.0) / math.tan(fov_y_radians / 2.0) - return IntrinsicParameters( - focal_length_x_px=focal_length_x_px, - focal_length_y_px=focal_length_y_px, - optical_center_x_px=optical_center_x_px, - optical_center_y_px=optical_center_y_px, - radial_distortion_coefficients=[0.0, 0.0, 0.0], - tangential_distortion_coefficients=[0.0, 0.0]) diff --git a/src/common/structures/key_value_structures.py b/src/common/structures/key_value_structures.py deleted file mode 100644 index 8e8cf9e..0000000 --- a/src/common/structures/key_value_structures.py +++ /dev/null @@ -1,135 +0,0 @@ -import abc -from pydantic import BaseModel, Field -from typing import Final, Literal, Union - - -class KeyValueSimpleAbstract(BaseModel, abc.ABC): - """ - Abstract class to represent a key-value pair. - Intended use: Setting parameter over network, serialization through JSON. - """ - parsable_type: str - key: str = Field() - - -class KeyValueSimpleBool(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "bool" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: bool = Field() - - -class KeyValueSimpleFloat(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "float" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: float = Field() - - -class KeyValueSimpleInt(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "int" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: int = Field() - - -class KeyValueSimpleString(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "str" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: str = Field() - - -class KeyValueMetaAbstract(BaseModel, abc.ABC): - """ - Abstract class to represent a key-value pair, and additional data about the datum (range, description, etc) - Intended use: Getting parameter over network, serialization through JSON. - """ - parsable_type: str - key: str = Field() - - @abc.abstractmethod - def to_simple(self) -> KeyValueSimpleAbstract: ... - - -class KeyValueMetaBool(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "bool" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: bool = Field() - - def to_simple(self) -> KeyValueSimpleBool: - return KeyValueSimpleBool(key=self.key, value=self.value) - - -class KeyValueMetaEnum(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "enum" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: str = Field() - allowable_values: list[str] = Field(default_factory=list) - - def to_simple(self) -> KeyValueSimpleString: - return KeyValueSimpleString(key=self.key, value=self.value) - - -class KeyValueMetaFloat(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "float" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: float = Field() - range_minimum: float = Field() - range_maximum: float = Field() - range_step: float = Field(default=1.0) - digit_count: int = Field(default=2) - - def to_simple(self) -> KeyValueSimpleFloat: - return KeyValueSimpleFloat(key=self.key, value=self.value) - - -class KeyValueMetaInt(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "int" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: int = Field() - range_minimum: int = Field() - range_maximum: int = Field() - range_step: int = Field(default=1) - - def to_simple(self) -> KeyValueSimpleInt: - return KeyValueSimpleInt(key=self.key, value=self.value) - - -# pydantic doesn't appear to handle very well typing's (TypeA, TypeB, ...) notation of a union -KeyValueSimpleAny = Union[ - KeyValueSimpleBool, - KeyValueSimpleString, - KeyValueSimpleFloat, - KeyValueSimpleInt] -KeyValueMetaAny = Union[ - KeyValueMetaBool, - KeyValueMetaEnum, - KeyValueMetaFloat, - KeyValueMetaInt] - - -def key_value_meta_to_simple( - key_value_meta_list: list[KeyValueMetaAny] -) -> list[KeyValueSimpleAny]: - return [key_value_meta.to_simple() for key_value_meta in key_value_meta_list] diff --git a/src/common/structures/marker_corner_image_point.py b/src/common/structures/marker_corner_image_point.py deleted file mode 100644 index d3a3be0..0000000 --- a/src/common/structures/marker_corner_image_point.py +++ /dev/null @@ -1,6 +0,0 @@ -from pydantic import BaseModel, Field - - -class MarkerCornerImagePoint(BaseModel): - x_px: float = Field() - y_px: float = Field() diff --git a/src/common/structures/marker_corners.py b/src/common/structures/marker_corners.py deleted file mode 100644 index a5d6a0c..0000000 --- a/src/common/structures/marker_corners.py +++ /dev/null @@ -1,20 +0,0 @@ -import datetime - -# TODO: Remove this in favour of DetectorFrame or another data structure -class MarkerCorners: - detector_label: str - marker_id: int - points: list[list[float]] - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - marker_id: int, - points: list[list[float]], - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.marker_id = marker_id - self.points = points - self.timestamp = timestamp diff --git a/src/common/structures/marker_definition.py b/src/common/structures/marker_definition.py deleted file mode 100644 index 96f8f31..0000000 --- a/src/common/structures/marker_definition.py +++ /dev/null @@ -1,28 +0,0 @@ -import base64 -import numpy -from pydantic import BaseModel, Field - - -class MarkerDefinition(BaseModel): - label: str = Field() - representation_single_base64: str = Field() # representation from a single rotation only - - def representation_all_base64(self): - """ - OpenCV ArUco expects to receive all possible rotations of a marker. We generate these programmatically. - """ - representation_single_bytes: bytes = base64.b64decode(self.representation_single_base64) - representation_single_list: list[bool] = list(representation_single_bytes) - representation_single_matrix: numpy.ndarray = numpy.asarray( - a=representation_single_list, - dtype=bool) - marker_side_length_bits: int = int(numpy.sqrt(len(representation_single_list))) - representation_single_matrix = numpy.reshape( - a=representation_single_matrix, - newshape=(marker_side_length_bits, marker_side_length_bits)) - representation_all_list: list[bool] = list(representation_single_matrix.flatten()) - for i in range(3): - representation_single_matrix = numpy.rot90(representation_single_matrix) - representation_all_list += list(representation_single_matrix.flatten()) - representation_all_bytes: bytes = bytes(representation_all_list) - return base64.b64encode(representation_all_bytes) diff --git a/src/common/structures/marker_snapshot.py b/src/common/structures/marker_snapshot.py deleted file mode 100644 index 849297c..0000000 --- a/src/common/structures/marker_snapshot.py +++ /dev/null @@ -1,7 +0,0 @@ -from .marker_corner_image_point import MarkerCornerImagePoint -from pydantic import BaseModel, Field - - -class MarkerSnapshot(BaseModel): - label: str = Field() - corner_image_points: list[MarkerCornerImagePoint] = Field() diff --git a/src/common/structures/matrix4x4.py b/src/common/structures/matrix4x4.py deleted file mode 100644 index 84faa8f..0000000 --- a/src/common/structures/matrix4x4.py +++ /dev/null @@ -1,77 +0,0 @@ -import numpy -from pydantic import BaseModel, Field - - -def _identity_values() -> list[float]: - return \ - [1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0] - - -class Matrix4x4(BaseModel): - values: list[float] = Field(default_factory=_identity_values) - - def as_numpy_array(self): - a = self.values - return numpy.asarray( - [[a[0], a[1], a[2], a[3]], - [a[4], a[5], a[6], a[7]], - [a[8], a[9], a[10], a[11]], - [a[12], a[13], a[14], a[15]]]) - - def __getitem__(self, idx: tuple[int, int]) -> float: - if isinstance(idx, tuple): - return self.values[(idx[0]*4) + idx[1]] - else: - raise ValueError("Unexpected index. Expected tuple index [row,col].") - - def __mul__(self, other) -> 'Matrix4x4': - if not isinstance(other, Matrix4x4): - raise ValueError - result_numpy_array = numpy.matmul(self.as_numpy_array(), other.as_numpy_array()) - return Matrix4x4(values=list(result_numpy_array.flatten())) - - def get_translation(self) -> list[float]: - """ - Return a vector of [x,y,z] representing translation. - """ - a = self.values - return [a[3], a[7], a[11]] - - def inverse(self) -> 'Matrix4x4': - inv_numpy_array = numpy.linalg.inv(self.as_numpy_array()) - return Matrix4x4.from_numpy_array(inv_numpy_array) - - @staticmethod - def from_raw_values( - v00, v01, v02, v03, - v10, v11, v12, v13, - v20, v21, v22, v23, - v30, v31, v32, v33 - ) -> 'Matrix4x4': - return Matrix4x4(values=[ - v00, v01, v02, v03, - v10, v11, v12, v13, - v20, v21, v22, v23, - v30, v31, v32, v33]) - - @staticmethod - def from_list( - value_list: list[float] - ) -> 'Matrix4x4': - if len(value_list) != 16: - raise ValueError(f"Expected a list of 16 float. Got {str(value_list)}.") - return Matrix4x4(values=list(value_list)) - - @staticmethod - def from_numpy_array( - value_array: numpy.ndarray - ) -> 'Matrix4x4': - if len(value_array) != 4: - raise ValueError(f"Expected input array to have 4 rows. Got {len(value_array)}.") - for i in range(0, len(value_array)): - if len(value_array[i]) != 4: - raise ValueError(f"Expected input row {i} to have 4 col. Got {len(value_array[i])}.") - return Matrix4x4(values=list(value_array.flatten())) diff --git a/src/common/structures/mct_parsable.py b/src/common/structures/mct_parsable.py deleted file mode 100644 index d6400f1..0000000 --- a/src/common/structures/mct_parsable.py +++ /dev/null @@ -1,56 +0,0 @@ -from src.common.exceptions import MCTParsingError -import abc -from pydantic import ValidationError -from typing import TypeVar - - -ParsableDynamic = TypeVar('ParsableDynamic', bound='MCTParsable') - - -class MCTParsable(abc.ABC): - - @staticmethod - @abc.abstractmethod - def parsable_type_identifier() -> str: - pass - - @staticmethod - def parse_dynamic_series_list( - parsable_series_dict: dict, - supported_types: list[type[ParsableDynamic]] - ) -> list[ParsableDynamic]: - if "series" not in parsable_series_dict or not isinstance(parsable_series_dict["series"], list): - message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." - raise MCTParsingError(message) - - output_series: list[ParsableDynamic] = list() - for parsable_dict in parsable_series_dict["series"]: - if not isinstance(parsable_dict, dict): - message: str = "series contained a non-dict element. Input is improperly formatted." - raise MCTParsingError(message) - output_series.append(MCTParsable.parse_dynamic_single( - parsable_dict=parsable_dict, - supported_types=supported_types)) - - return output_series - - @staticmethod - def parse_dynamic_single( - parsable_dict: dict, - supported_types: list[type[ParsableDynamic]] - ) -> ParsableDynamic: - if "parsable_type" not in parsable_dict or not isinstance(parsable_dict["parsable_type"], str): - message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." - raise MCTParsingError(message) from None - - for supported_type in supported_types: - if parsable_dict["parsable_type"] == supported_type.parsable_type_identifier(): - request: ParsableDynamic - try: - request = supported_type(**parsable_dict) - except ValidationError as e: - raise MCTParsingError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None - return request - - message: str = "parsable_type did not match any expected value. Input is improperly formatted." - raise MCTParsingError(message) diff --git a/src/common/structures/pose.py b/src/common/structures/pose.py deleted file mode 100644 index b43990b..0000000 --- a/src/common/structures/pose.py +++ /dev/null @@ -1,10 +0,0 @@ -from .matrix4x4 import Matrix4x4 -import datetime -from pydantic import BaseModel, Field -import uuid - - -class Pose(BaseModel): - target_id: str = Field() - object_to_reference_matrix: Matrix4x4 = Field() - solver_timestamp_utc_iso8601: str = Field() diff --git a/src/common/structures/pose_solver_frame.py b/src/common/structures/pose_solver_frame.py deleted file mode 100644 index f17097e..0000000 --- a/src/common/structures/pose_solver_frame.py +++ /dev/null @@ -1,12 +0,0 @@ -from .pose import Pose -import datetime -from pydantic import BaseModel, Field - - -class PoseSolverFrame(BaseModel): - detector_poses: list[Pose] | None = Field() - target_poses: list[Pose] | None = Field() - timestamp_utc_iso8601: str = Field() - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/structures/pose_solver_status.py b/src/common/structures/pose_solver_status.py deleted file mode 100644 index d8bc09d..0000000 --- a/src/common/structures/pose_solver_status.py +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Final -from enum import IntEnum - - -class PoseSolverStatus: - - class Solve(IntEnum): - STOPPED: Final[int] = 0 - RUNNING: Final[int] = 1 - FAILURE: Final[int] = 2 - - solve_status: Solve - solve_errors: list[str] - - def __init__(self): - self.solve_status = PoseSolverStatus.Solve.STOPPED - self.solve_errors = list() - - def in_runnable_state(self): - return self.solve_status == PoseSolverStatus.Solve.RUNNING diff --git a/src/common/structures/status_message.py b/src/common/structures/status_message.py deleted file mode 100644 index 76eff42..0000000 --- a/src/common/structures/status_message.py +++ /dev/null @@ -1,25 +0,0 @@ -import logging -from pydantic import BaseModel, Field -from typing import Final, Literal - - -SEVERITY_LABEL_DEBUG: Final[str] = "debug" -SEVERITY_LABEL_INFO: Final[str] = "info" -SEVERITY_LABEL_WARNING: Final[str] = "warning" -SEVERITY_LABEL_ERROR: Final[str] = "error" -SEVERITY_LABEL_CRITICAL: Final[str] = "critical" - -SeverityLabel = Literal["debug", "info", "warning", "error", "critical"] -SEVERITY_LABEL_TO_INT: Final[dict[SeverityLabel, int]] = { - "debug": logging.DEBUG, - "info": logging.INFO, - "warning": logging.WARNING, - "error": logging.ERROR, - "critical": logging.CRITICAL} - - -class StatusMessage(BaseModel): - source_label: str = Field() - severity: SeverityLabel = Field() - message: str - timestamp_utc_iso8601: str diff --git a/src/common/structures/target.py b/src/common/structures/target.py deleted file mode 100644 index 3ac2833..0000000 --- a/src/common/structures/target.py +++ /dev/null @@ -1,90 +0,0 @@ -import abc -import numpy -from pydantic import BaseModel, Field, PrivateAttr - - -class Marker(BaseModel): - marker_id: str = Field() - marker_size: float | None = Field(default=None) - points: list[list[float]] | None = Field(default=None) - - # TODO: During validation, make sure either marker_size or points is defined, but not both. - - def get_marker_size(self) -> float: - if self.marker_size is None: - if self.points is None or len(self.points) < 2: - raise RuntimeError("TargetMarker defined with neither marker_size nor enough points.") - marker_size_sum: float = 0.0 - for point_index in range(0, len(self.points)): - point_a: numpy.ndarray = numpy.asarray(self.points[point_index]) - point_b: numpy.ndarray = numpy.asarray(self.points[point_index-1]) - vector: numpy.ndarray = point_a - point_b - marker_size_sum += numpy.linalg.norm(vector) - self.marker_size = marker_size_sum / len(self.points) - return self.marker_size - - def get_points_internal(self) -> list[list[float]]: - # Use the TargetBase.get_points() instead. - if self.points is None: - if self.marker_size is None: - raise RuntimeError("TargetMarker defined with neither marker_size nor points.") - half_width = self.marker_size / 2.0 - self.points = [ - [-half_width, half_width, 0.0], - [half_width, half_width, 0.0], - [half_width, -half_width, 0.0], - [-half_width, -half_width, 0.0]] - return self.points - - -class TargetBase(BaseModel, abc.ABC): - target_id: str = Field() - - @abc.abstractmethod - def get_marker_ids(self) -> list[str]: ... - - @abc.abstractmethod - def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: ... - - @abc.abstractmethod - def get_points(self) -> list[list[float]]: ... - - -class TargetMarker(TargetBase, Marker): - def get_marker_ids(self) -> list[str]: - return [self.marker_id] - - def get_points(self) -> list[list[float]]: - return self.get_points_internal() - - def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: - if marker_id != self.marker_id: - raise IndexError(f"marker_id {marker_id} is not in target {self.target_id}") - return self.get_points_internal() - - -class TargetBoard(TargetBase): - markers: list[Marker] = Field() - _marker_dict: None | dict[str, Marker] = PrivateAttr() - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self._marker_dict = None - - def get_marker_ids(self) -> list[str]: - return [marker.marker_id for marker in self.markers] - - def get_points(self) -> list[list[float]]: - points = list() - for marker in self.markers: - points += marker.get_points_internal() - return points - - def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: - if self._marker_dict is None: - self._marker_dict = dict() - for marker in self.markers: - self._marker_dict[marker.marker_id] = marker - if marker_id not in self._marker_dict: - raise IndexError(f"marker_id {marker_id} is not in target {self.target_id}") - return self._marker_dict[marker_id].points diff --git a/src/common/structures/vec3.py b/src/common/structures/vec3.py deleted file mode 100644 index 4445d4d..0000000 --- a/src/common/structures/vec3.py +++ /dev/null @@ -1,7 +0,0 @@ -from pydantic import BaseModel, Field - - -class Vec3(BaseModel): - x: float = Field() - y: float = Field() - z: float = Field() diff --git a/src/common/util/__init__.py b/src/common/util/__init__.py deleted file mode 100644 index 5d14bf2..0000000 --- a/src/common/util/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .io_utils import IOUtils -from .math_utils import MathUtils -from .register_corresponding_points import register_corresponding_points diff --git a/src/common/util/io_utils.py b/src/common/util/io_utils.py deleted file mode 100644 index 6f7a1e9..0000000 --- a/src/common/util/io_utils.py +++ /dev/null @@ -1,154 +0,0 @@ -import hjson -import json -import os -from typing import Any, Callable, Literal - - -class IOUtils: - """ - static class for IO-related utility functions. - """ - - PathType = Literal["filepath", "path"] - - @staticmethod - def exists( - path: str, - pathtype: PathType, - on_error_for_user: Callable[[str], Any], - on_error_for_dev: Callable[[str], Any], - create_path: bool = False - ) -> bool: - """ - :param path: Location of target - :param pathtype: "filepath" (target is file) or "path" (target is directory) - :param on_error_for_user: - Function to supply with a publicly-viewable string (message viewable by end-user) in the event of an error. - :param on_error_for_dev: - Function to supply with a developer-friendly string (not by end-user) in the event of an error. - :param create_path: - If pathtype == "path" and this is True, try to create the path if it does not exist. Default False. - :return: True if there is a file/path at the indicated path, otherwise False. - """ - exists: bool = os.path.exists(path) - if pathtype == "filepath": - if exists and not os.path.isfile(path): - on_error_for_user( - "Filepath location exists but is not a file. " - "Most likely a directory exists at that location, " - "and it needs to be manually removed.") - on_error_for_dev( - f"Specified filepath location {path} exists but is not a file.") - return False - elif pathtype == "path": - if exists and not os.path.isdir(path): - on_error_for_user( - "Path location exists but is not a path. " - "Most likely a file exists at that location, " - "and it needs to be manually removed.") - on_error_for_dev( - f"Specified path location {path} exists but is not a path.") - return False - if create_path and not exists: - os.makedirs(name=path, exist_ok=True) - exists = os.path.exists(path) - return exists - - @staticmethod - def hjson_read( - filepath: str, - on_error_for_user: Callable[[str], Any], - on_error_for_dev: Callable[[str], Any] - ) -> dict | None: - """ - :param filepath: - :param on_error_for_user: - Function to supply with a publicly-viewable string (message viewable by end-user) in the event of an error. - :param on_error_for_dev: - Function to supply with a developer-friendly string (not by end-user) in the event of an error. - :return: Dictionary representing the JSON data if successful, otherwise None - """ - if not IOUtils.exists( - path=filepath, - pathtype="filepath", - on_error_for_user=on_error_for_user, - on_error_for_dev=on_error_for_dev - ): - return None - json_dict: dict - try: - with open(filepath, 'r', encoding='utf-8') as input_file: - json_dict = hjson.load(input_file) - except OSError as e: - on_error_for_user("An unexpected file I/O error happened while reading a file.") - on_error_for_dev(str(e)) - return None - return json_dict - - @staticmethod - def json_read( - filepath: str, - on_error_for_user: Callable[[str], Any], - on_error_for_dev: Callable[[str], Any] - ) -> dict | None: - """ - Generally it is better to use hjson_read instead, which is less strict and supports comments. - This function currently forwards to hjson_ready anyway. - """ - return IOUtils.hjson_read( - filepath=filepath, - on_error_for_user=on_error_for_user, - on_error_for_dev=on_error_for_dev) - - @staticmethod - def json_write( - filepath: str, - json_dict: dict, - on_error_for_user: Callable[[str], Any], - on_error_for_dev: Callable[[str], Any], - ignore_none: bool = False, - indent: int = 4 - ) -> bool: - """ - :param filepath: - :param json_dict: - :param on_error_for_user: - Function to supply with a publicly-viewable string (message viewable by end-user) in the event of an error. - :param on_error_for_dev: - Function to supply with a developer-friendly string (not by end-user) in the event of an error. - :param ignore_none: Explicitly ignore (don't write) any key-value pairs where the value is None. Default False. - :param indent: Width (spaces) of each indentation level. Default 4. - :return: True if the file was written, otherwise False. - """ - filepath = os.path.join(filepath) - path = os.path.dirname(filepath) - if not IOUtils.exists( - path=path, - pathtype="path", - on_error_for_user=on_error_for_user, - on_error_for_dev=on_error_for_dev, - create_path=True - ): - return False - if ignore_none is True: - json_dict = IOUtils._remove_all_none_from_dict_recursive(json_dict) - try: - with open(filepath, 'w', encoding='utf-8') as output_file: - json.dump(json_dict, output_file, sort_keys=False, indent=indent) - except OSError as e: - on_error_for_user("An unexpected file I/O error happened while writing a file.") - on_error_for_dev(str(e)) - return False - return True - - @staticmethod - def _remove_all_none_from_dict_recursive( - input_dict - ) -> dict: - output_dict = dict(input_dict) - for key in output_dict: - if isinstance(output_dict[key], dict): - output_dict[key] = IOUtils._remove_all_none_from_dict_recursive(output_dict[key]) - elif output_dict[key] is None: - del output_dict[key] - return output_dict diff --git a/src/common/util/math_utils.py b/src/common/util/math_utils.py deleted file mode 100644 index eba1b2c..0000000 --- a/src/common/util/math_utils.py +++ /dev/null @@ -1,125 +0,0 @@ -from ..structures import \ - IntrinsicParameters, \ - Matrix4x4, \ - TargetBase -import cv2 -import numpy -from scipy.spatial.transform import Rotation -from typing import TypeVar - - -XPointKey = TypeVar("XPointKey") - - -class MathUtils: - """ - static class for reused math-related functions. - """ - - def __init__(self): - raise RuntimeError("This class is not meant to be initialized.") - - @staticmethod - def convert_detector_points_to_vectors( - points: list[list[float]], # [point_index][x/y/z] - detector_intrinsics: IntrinsicParameters, - detector_to_reference_matrix: Matrix4x4 - ) -> list[list[float]]: - """ - Given a detector's matrix transform and its intrinsic properties, - convert pixel coordinates to ray directions (with origin at the detector). - """ - distorted_points: numpy.ndarray = numpy.asarray(points) - distorted_points = numpy.reshape( - a=distorted_points, - newshape=(1, len(points), 2)) - undistorted_points: numpy.ndarray = cv2.undistortPoints( - src=distorted_points, - cameraMatrix=numpy.asarray(detector_intrinsics.get_matrix()), - distCoeffs=numpy.asarray(detector_intrinsics.get_distortion_coefficients())) - rays: list[list[float]] = list() - origin_point_detector = [0, 0, 0, 1] # origin - detector_to_reference: numpy.ndarray = detector_to_reference_matrix.as_numpy_array() - for undistorted_point in undistorted_points: - target_point_image = [undistorted_point[0, 0], undistorted_point[0, 1], 1, 1] # reverse perspective - target_point_detector = MathUtils.image_to_opengl_vector(target_point_image) - ray_direction_detector = numpy.subtract(target_point_detector, origin_point_detector) - ray_direction_detector = ray_direction_detector / numpy.linalg.norm(ray_direction_detector) # normalize - ray_direction_reference = numpy.matmul(detector_to_reference, ray_direction_detector) - rays.append(list(ray_direction_reference[0:3])) - return rays - - @staticmethod - def convert_detector_corners_to_vectors( - corners_by_marker_id: dict[str, list[list[float]]], # [marker_id][point_index][x/y] - detector_intrinsics: IntrinsicParameters, - detector_to_reference_matrix: Matrix4x4 - ) -> dict[str, list[list[float]]]: # [marker_id][point_index][x/y/z] - """ - Given a detector's matrix transform and its intrinsic properties, - convert pixel coordinates to ray directions (with origin at the detector). - """ - ray_vectors_by_marker_id: dict[str, list[list[float]]] = dict() - corners: list[list[float]] - marker_id: str - for marker_id in corners_by_marker_id.keys(): - corners = corners_by_marker_id[marker_id] - rays: list[list[float]] = MathUtils.convert_detector_points_to_vectors( - points=corners, - detector_intrinsics=detector_intrinsics, - detector_to_reference_matrix=detector_to_reference_matrix) - ray_vectors_by_marker_id[marker_id] = rays - return ray_vectors_by_marker_id - - @staticmethod - def estimate_matrix_transform_to_detector( - target: TargetBase, - corners_by_marker_id: dict[str, list[list[float]]], # [marker_id][point_index][x/y/z] - detector_intrinsics: IntrinsicParameters - ) -> Matrix4x4: - target_points: list[list[float]] = list() # ordered points [point_index][x/y/z] - detector_points: list[list[float]] = list() # ordered points [point_index][x/y] - for marker_id in target.get_marker_ids(): - if marker_id in corners_by_marker_id: - target_points += target.get_points_for_marker_id(marker_id=marker_id) - detector_points += corners_by_marker_id[marker_id] - rotation_vector: numpy.ndarray - translation_vector: numpy.ndarray - _, rotation_vector, translation_vector = cv2.solvePnP( - objectPoints=numpy.asarray(target_points), - imagePoints=numpy.asarray(detector_points), - cameraMatrix=numpy.asarray(detector_intrinsics.get_matrix()), - distCoeffs=numpy.asarray(detector_intrinsics.get_distortion_coefficients())) - rotation_vector = rotation_vector.flatten() - translation_vector = translation_vector.flatten() - object_to_camera_matrix = numpy.identity(4, dtype="float32") - object_to_camera_matrix[0:3, 0:3] = Rotation.from_rotvec(rotation_vector).as_matrix() - object_to_camera_matrix[0:3, 3] = translation_vector[0:3] - object_to_detector_matrix = MathUtils.image_to_opengl_transformation_matrix(object_to_camera_matrix) - return Matrix4x4.from_numpy_array(object_to_detector_matrix) - - @staticmethod - def image_to_opengl_transformation_matrix( - transformation_matrix_image: numpy.ndarray - ) -> numpy.ndarray: - transformation_matrix_detector = numpy.array(transformation_matrix_image) - transformation_matrix_detector[1:3, :] = -transformation_matrix_detector[1:3, :] - return transformation_matrix_detector - # Equivalent to: - # transformation_matrix_180 = numpy.identity(4, dtype="float") - # transformation_matrix_180[1, 1] *= -1 - # transformation_matrix_180[2, 2] *= -1 - # return numpy.matmul(transformation_matrix_180, transformation_matrix_image) - - @staticmethod - def image_to_opengl_vector( - vector_image: numpy.ndarray | list[float] - ) -> numpy.ndarray: - vector_detector = numpy.array(vector_image) - vector_detector[1:3] = -vector_detector[1:3] - return vector_detector - # Equivalent to: - # transformation_matrix_180 = numpy.identity(4, dtype="float") - # transformation_matrix_180[1, 1] *= -1 - # transformation_matrix_180[2, 2] *= -1 - # return numpy.matmul(transformation_matrix_180, vector_image) diff --git a/src/common/util/register_corresponding_points.py b/src/common/util/register_corresponding_points.py deleted file mode 100644 index d26545e..0000000 --- a/src/common/util/register_corresponding_points.py +++ /dev/null @@ -1,70 +0,0 @@ -# Solution based on: Arun et al. Least square fitting of two 3D point sets (1987) -# https://stackoverflow.com/questions/66923224/rigid-registration-of-two-point-clouds-with-known-correspondence -# Use mirroring solution proposed by Oomori et al. -# Oomori et al. Point cloud matching using singular value decomposition. (2016) -import numpy - - -def register_corresponding_points( - point_set_from: list[list[float]], - point_set_to: list[list[float]], - collinearity_do_check: bool = True, - collinearity_zero_threshold: float = 0.0001, - use_oomori_mirror_fix: bool = True -) -> numpy.array: # 4x4 transformation matrix, indexed by [row,col] - """ - :param point_set_from: - :param point_set_to: - :param collinearity_do_check: Do a (naive) collinearity check. May be computationally expensive. - :param collinearity_zero_threshold: Threshold considered zero for cross product and norm comparisons - """ - if len(point_set_from) != len(point_set_to): - raise ValueError("Input point sets must be of identical length.") - if len(point_set_from) < 3: - raise ValueError("Input point sets must be of length 3 or higher.") - if collinearity_do_check: - for point_set in (point_set_from, point_set_to): - collinear = True # assume true until shown otherwise - p1: numpy.ndarray = numpy.asarray(point_set[0]) - vec1: numpy.ndarray - i: int = 1 - for i in range(i, len(point_set)): - p2: numpy.ndarray = numpy.asarray(point_set[1]) - vec1 = p2 - p1 - vec1_length: float = numpy.linalg.norm(vec1) - if vec1_length > collinearity_zero_threshold: - break # points are distinct, move to next phase - for i in range(i, len(point_set)): - p3: numpy.ndarray = numpy.asarray(point_set[2]) - vec2: numpy.ndarray = p3 - p1 - cross_product_norm: float = numpy.linalg.norm(numpy.cross(vec1, vec2)) - if cross_product_norm > collinearity_zero_threshold: - collinear = False - break - if collinear: - raise ValueError("Input points appear to be collinear - please check the input.") - - # for consistency, points are in rows - point_count = len(point_set_from) - sums_from = numpy.array([0, 0, 0], dtype="float32") - sums_to = numpy.array([0, 0, 0], dtype="float32") - for point_index in range(0, point_count): - sums_from += numpy.array(point_set_from[point_index]) - sums_to += numpy.array(point_set_to[point_index]) - centroid_from = (sums_from / point_count) - centroid_to = (sums_to / point_count) - points_from = numpy.array(point_set_from) - points_to = numpy.array(point_set_to) - centered_points_from = points_from - numpy.hstack(centroid_from) - centered_points_to = points_to - numpy.hstack(centroid_to) - covariance = numpy.matmul(centered_points_from.T, centered_points_to) - u, _, vh = numpy.linalg.svd(covariance) - s = numpy.identity(3, dtype="float32") # s will be the Oomori mirror fix - if use_oomori_mirror_fix: - s[2, 2] = numpy.linalg.det(numpy.matmul(u, vh)) - rotation = numpy.matmul(u, numpy.matmul(s, vh)).transpose() - translation = centroid_to - numpy.matmul(rotation, centroid_from) - matrix = numpy.identity(4, dtype="float32") - matrix[0:3, 0:3] = rotation - matrix[0:3, 3] = translation[0:3].reshape(3) - return matrix diff --git a/src/controller/__init__.py b/src/controller/__init__.py index 56390e1..f809d53 100644 --- a/src/controller/__init__.py +++ b/src/controller/__init__.py @@ -1,5 +1,9 @@ -from .mct_controller import MCTController -from .structures import \ - MCTComponentAddress, \ +from .connection import \ Connection, \ - ConnectionReport + DetectorConnection, \ + PoseSolverConnection +from .configuration import \ + MCTComponentConfig, \ + MCTConfiguration, \ + StartupMode +from .mct_controller import MCTController diff --git a/src/controller/structures/mct_configuration.py b/src/controller/configuration.py similarity index 73% rename from src/controller/structures/mct_configuration.py rename to src/controller/configuration.py index 3e26675..5d86d32 100644 --- a/src/controller/structures/mct_configuration.py +++ b/src/controller/configuration.py @@ -1,11 +1,14 @@ -from .startup_mode import StartupMode -from src.common.structures import \ +from src.common import \ KeyValueSimpleAny, \ Matrix4x4, \ - TargetBoard, \ - TargetMarker + Target +from enum import StrEnum from pydantic import BaseModel, Field, SerializeAsAny -from typing import Union + + +class StartupMode(StrEnum): + DETECTING_ONLY = "detecting_only" + DETECTING_AND_SOLVING = "detecting_and_solving" class MCTComponentConfig(BaseModel): @@ -22,10 +25,10 @@ class DetectorComponentConfig(MCTComponentConfig): class PoseSolverConfig(MCTComponentConfig): solver_parameters: list[SerializeAsAny[KeyValueSimpleAny]] | None = Field(default=None) - targets: list[Union[TargetBoard, TargetMarker]] | None = Field(default=None) + targets: list[Target] | None = Field(default=None) class MCTConfiguration(BaseModel): startup_mode: StartupMode = Field() detectors: list[DetectorComponentConfig] = Field(default_factory=list) - pose_solvers: list[PoseSolverConfig] = Field(default_factory=list) + mixers: list[PoseSolverConfig] = Field(default_factory=list) diff --git a/src/controller/structures/connection.py b/src/controller/connection.py similarity index 57% rename from src/controller/structures/connection.py rename to src/controller/connection.py index 0bf73bf..e5175e8 100644 --- a/src/controller/structures/connection.py +++ b/src/controller/connection.py @@ -1,20 +1,59 @@ -from .mct_component_address import MCTComponentAddress -from .connection_report import ConnectionReport from src.common import \ DequeueStatusMessagesResponse, \ + DetectorFrame, \ EmptyResponse, \ ErrorResponse, \ + ImageResolution, \ + IntrinsicParameters, \ + KeyValueSimpleAny, \ + Matrix4x4, \ + MCTDeserializable, \ + MCTRequest, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ - TimestampGetResponse -from src.common.structures import \ - MCTParsable, \ + Pose, \ + MixerFrame, \ SeverityLabel, \ - StatusMessage + StatusMessage, \ + Target, \ + TimestampGetResponse +from src.detector import \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationImageAddResponse, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultMetadataListResponse, \ + CameraImageGetResponse, \ + CameraParametersGetResponse, \ + CameraParametersSetRequest, \ + CameraParametersSetResponse, \ + CameraResolutionGetResponse, \ + DetectorFrameGetResponse, \ + DetectorStartRequest, \ + DetectorStopRequest, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest +from src.mixer import \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultGetActiveResponse, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + PoseSolverAddTargetResponse, \ + PoseSolverGetPosesResponse, \ + PoseSolverSetTargetsRequest, \ + MixerStartRequest, \ + MixerStopRequest import abc import datetime from enum import StrEnum +from ipaddress import IPv4Address import json from typing import Final import uuid @@ -35,17 +74,35 @@ class Connection(abc.ABC): class State(StrEnum): # This is the normal progression cycle ending back in "Inactive" - INACTIVE: Final[str] = "Inactive" - CONNECTING: Final[str] = "Connecting" - INITIALIZING: Final[str] = "Initializing" - RUNNING: Final[str] = "Running" - RECONNECTING: Final[str] = "Reconnecting" # Only if connection gets lost - NORMAL_DEINITIALIZING: Final[str] = "Deinitializing" # normal means not in a failure state - NORMAL_DISCONNECTING: Final[str] = "Disconnecting" + INACTIVE = "Inactive" + CONNECTING = "Connecting" + INITIALIZING = "Initializing" + RUNNING = "Running" + RECONNECTING = "Reconnecting" # Only if connection gets lost + NORMAL_DEINITIALIZING = "Deinitializing" # normal means not in a failure state + NORMAL_DISCONNECTING = "Disconnecting" # States below indicate abnormal/failed states - FAILURE: Final[str] = "Failure" - FAILURE_DISCONNECTING: Final[str] = "Failure - Disconnecting" - FAILURE_DEINITIALIZING: Final[str] = "Failure - Deinitializing" + FAILURE = "Failure" + FAILURE_DISCONNECTING = "Failure - Disconnecting" + FAILURE_DEINITIALIZING = "Failure - Deinitializing" + + class ComponentAddress: + """ + Information used to establish a connection, + there is nothing that should change here without a user's explicit input. + """ + + def __init__( + self, + label: str, + role: str, + ip_address: IPv4Address, + port: int + ): + self.label = label + self.role = role + self.ip_address = ip_address + self.port = port class ConnectionResult: success: bool @@ -60,25 +117,25 @@ def __init__( self.error_message = error_message class DeinitializationResult(StrEnum): - IN_PROGRESS: Final[str] = "In Progress" - SUCCESS: Final[str] = "Success" - FAILURE: Final[str] = "Failure" + IN_PROGRESS = "In Progress" + SUCCESS = "Success" + FAILURE = "Failure" class InitializationResult(StrEnum): - IN_PROGRESS: Final[str] = "In Progress" - SUCCESS: Final[str] = "Success" - FAILURE: Final[str] = "Failure" + IN_PROGRESS = "In Progress" + SUCCESS = "Success" + FAILURE = "Failure" class SendRecvResult(StrEnum): - NORMAL: Final[str] = "Normal" - FAILURE: Final[str] = "Failure" + NORMAL = "Normal" + FAILURE = "Failure" class PopResponseSeriesResult: class Status(StrEnum): - QUEUED: Final[str] = "Exists" - IN_PROGRESS: Final[str] = "In Progress" - RESPONDED: Final[str] = "Responded" - UNTRACKED: Final[str] = "Untracked" # this suggests an error has occurred + QUEUED = "Exists" + IN_PROGRESS = "In Progress" + RESPONDED = "Responded" + UNTRACKED = "Untracked" # this suggests an error has occurred status: Status response_series: MCTResponseSeries | None @@ -90,8 +147,42 @@ def __init__( self.status = status self.response_series = response_series + class Report: + """ + Human-readable information that shall be shown to a user about a connection. + """ + label: str + role: str + ip_address: str + port: int + status: str + + def __init__( + self, + label: str, + role: str, + ip_address: str, + port: int, + status: str + ): + self.label = label + self.role = role + self.ip_address = ip_address + self.port = port + self.status = status + + def __eq__(self, other): + if not isinstance(other, Connection.Report): + return False + return ( + self.label == other.label and + self.role == other.role and + self.ip_address == other.ip_address and + self.port == other.port and + self.status == other.status) + # treat as immutable - _component_address: MCTComponentAddress + _component_address: ComponentAddress _state: State @@ -116,7 +207,7 @@ def __init__( def __init__( self, - component_address: MCTComponentAddress + component_address: ComponentAddress ): self._component_address = component_address @@ -174,7 +265,7 @@ def enqueue_status_message( source_label=self._component_address.label, severity=severity, message=message, - timestamp_utc_iso8601=datetime.datetime.utcnow().isoformat())) + timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat())) def get_current_state(self) -> str: return self._state @@ -182,8 +273,8 @@ def get_current_state(self) -> str: def get_label(self) -> str: return self._component_address.label - def get_report(self) -> ConnectionReport: - return ConnectionReport( + def get_report(self) -> Report: + return Connection.Report( label=self._component_address.label, role=self._component_address.role, ip_address=str(self._component_address.ip_address), @@ -248,8 +339,8 @@ def _send_recv(self) -> SendRecvResult: def _response_series_converter( response_series_dict: dict ) -> MCTResponseSeries: - series_list: list[MCTResponse] = MCTParsable.parse_dynamic_series_list( - parsable_series_dict=response_series_dict, + series_list: list[MCTResponse] = MCTDeserializable.deserialize_series_list( + series_dict=response_series_dict, supported_types=self.supported_response_types()) return MCTResponseSeries(series=series_list) @@ -263,7 +354,7 @@ def _response_series_converter( except ConnectionClosed as e: self._state = Connection.State.FAILURE self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection is closed for {self._component_address.label}. Cannot send. {str(e)}") return Connection.SendRecvResult.FAILURE @@ -280,7 +371,7 @@ def _response_series_converter( except ConnectionClosed as e: self._state = Connection.State.FAILURE self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection is closed for {self._component_address.label}. Cannot receive. {str(e)}") return Connection.SendRecvResult.FAILURE @@ -312,15 +403,19 @@ def start_up(self) -> None: f"Current state: {self._state}") self._state = Connection.State.CONNECTING self._attempt_count = 0 - self._next_attempt_timestamp_utc = datetime.datetime.utcnow() + self._next_attempt_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) @abc.abstractmethod - def supported_response_types(self) -> list[type[MCTResponse]]: - return [ + def supported_response_types(self) -> dict[str, type[MCTResponse]]: + type_list: list[MCTResponse] = [ DequeueStatusMessagesResponse, EmptyResponse, ErrorResponse, TimestampGetResponse] + type_dict: dict[str, type[MCTResponse]] = { + type_single.type_identifier(): type_single + for type_single in type_list} + return type_dict def _try_connect(self) -> ConnectionResult: uri: str = f"ws://{self._component_address.ip_address}:{self._component_address.port}/websocket" @@ -368,7 +463,7 @@ def _update_deinitialization_result(self) -> DeinitializationResult: request_series_id=self._deinit_request_id) if response_result.status == Connection.PopResponseSeriesResult.Status.UNTRACKED: self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"The current request ID is not recognized.") self._deinit_request_id = None return Connection.DeinitializationResult.FAILURE @@ -392,7 +487,7 @@ def _update_initialization_result(self) -> InitializationResult: request_series_id=self._init_request_id) if response_result.status == Connection.PopResponseSeriesResult.Status.UNTRACKED: self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"The current request ID is not recognized.") self._init_request_id = None return Connection.InitializationResult.FAILURE @@ -404,26 +499,32 @@ def _update_initialization_result(self) -> InitializationResult: return Connection.InitializationResult.IN_PROGRESS def _update_in_connecting_state(self) -> None: - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(tz=datetime.timezone.utc) if now_utc >= self._next_attempt_timestamp_utc: self._attempt_count += 1 connection_result: Connection.ConnectionResult = self._try_connect() if connection_result.success: message = f"Connection successful." - self.enqueue_status_message(severity="info", message=message) + self.enqueue_status_message( + severity=SeverityLabel.INFO, + message=message) self._state = Connection.State.INITIALIZING else: if self._attempt_count >= _ATTEMPT_COUNT_MAXIMUM: message = \ f"Failed to connect, received error: {str(connection_result.error_message)}. "\ f"Connection is being aborted after {self._attempt_count} attempts." - self.enqueue_status_message(severity="error", message=message) + self.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=message) self._state = Connection.State.FAILURE else: message: str = \ f"Failed to connect, received error: {str(connection_result.error_message)}. "\ f"Will retry in {_ATTEMPT_TIME_GAP_SECONDS} seconds." - self.enqueue_status_message(severity="warning", message=message) + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=message) self._next_attempt_timestamp_utc = now_utc + datetime.timedelta( seconds=_ATTEMPT_TIME_GAP_SECONDS) @@ -461,20 +562,204 @@ def _update_in_normal_disconnecting_state(self) -> None: self._state = Connection.State.INACTIVE def _update_in_reconnecting_state(self) -> None: - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(tz=datetime.timezone.utc) if now_utc >= self._next_attempt_timestamp_utc: connection_result: Connection.ConnectionResult = self._try_connect() if connection_result.success: message = f"Reconnection successful." - self.enqueue_status_message(severity="info", message=message) + self.enqueue_status_message( + severity=SeverityLabel.INFO, + message=message) self._state = Connection.State.RUNNING else: message: str = \ f"Failed to reconnect, received error: {str(connection_result.error_message)}. "\ f"Will retry in {_ATTEMPT_TIME_GAP_SECONDS} seconds." - self.enqueue_status_message(severity="warning", message=message) + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=message) self._next_attempt_timestamp_utc = now_utc + datetime.timedelta( seconds=_ATTEMPT_TIME_GAP_SECONDS) def _update_in_running_state(self) -> None: self._send_recv() + + +class DetectorConnection(Connection): + + configured_transform_to_reference: Matrix4x4 | None + configured_camera_parameters: list[KeyValueSimpleAny] | None + configured_marker_parameters: list[KeyValueSimpleAny] | None + + # These are variables used directly by the MCTController for storing data + request_id: uuid.UUID | None + current_resolution: ImageResolution | None + current_intrinsic_parameters: IntrinsicParameters | None + latest_frame: DetectorFrame | None + recording: list[DetectorFrame] | None + + def __init__( + self, + component_address: Connection.ComponentAddress + ): + super().__init__(component_address=component_address) + + self.configured_transform_to_reference = None + self.configured_camera_parameters = None + self.configured_marker_parameters = None + + self.request_id = None + self.current_resolution = None + self.current_intrinsic_parameters = None + self.latest_frame = None + self.recording = [] + + def create_deinitialization_request_series(self) -> MCTRequestSeries: + return MCTRequestSeries(series=[DetectorStopRequest()]) + + def create_initialization_request_series(self) -> MCTRequestSeries: + series: list[MCTRequest] = [DetectorStartRequest()] + if self.configured_camera_parameters is not None: + series.append(CameraParametersSetRequest(parameters=self.configured_camera_parameters)) + if self.configured_marker_parameters is not None: + series.append(AnnotatorParametersSetRequest(parameters=self.configured_marker_parameters)) + return MCTRequestSeries(series=series) + + def handle_deinitialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.DeinitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], (EmptyResponse, CameraParametersSetResponse)): + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"The deinitialization response was not of the expected type EmptyResponse.") + return Connection.DeinitializationResult.SUCCESS + + def handle_initialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.InitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"Expected exactly one response to initialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], EmptyResponse): + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"The initialization response was not of the expected type EmptyResponse.") + return Connection.InitializationResult.SUCCESS + + def supported_response_types(self) -> dict[str, type[MCTResponse]]: + type_dict: dict[str, type[MCTResponse]] = super().supported_response_types() + type_list: list[MCTResponse] = [ + AnnotatorParametersGetResponse, + CameraImageGetResponse, + CameraParametersGetResponse, + CameraParametersSetResponse, + CameraResolutionGetResponse, + DetectorFrameGetResponse, + IntrinsicCalibrationCalculateResponse, + IntrinsicCalibrationImageAddResponse, + IntrinsicCalibrationImageGetResponse, + IntrinsicCalibrationImageMetadataListResponse, + IntrinsicCalibrationResolutionListResponse, + IntrinsicCalibrationResultGetResponse, + IntrinsicCalibrationResultGetActiveResponse, + IntrinsicCalibrationResultMetadataListResponse] + type_dict.update({ + type_single.type_identifier(): type_single + for type_single in type_list}) + return type_dict + + +class PoseSolverConnection(Connection): + + # These are variables used directly by the MCTController for storing data + + configured_solver_parameters: list[KeyValueSimpleAny] | None + configured_targets: list[Target] | None + + request_id: uuid.UUID | None + detector_poses: list[Pose] + target_poses: list[Pose] + detector_timestamps: dict[str, datetime.datetime] # access by detector_label + poses_timestamp: datetime.datetime + recording: list[MixerFrame] | None + + def __init__( + self, + component_address: Connection.ComponentAddress + ): + super().__init__(component_address=component_address) + + self.configured_solver_parameters = None + self.configured_targets = None + + self.request_id = None + self.detector_poses = list() + self.target_poses = list() + self.detector_timestamps = dict() + self.poses_timestamp = datetime.datetime.min + self.recording = [] + + def create_deinitialization_request_series(self) -> MCTRequestSeries: + return MCTRequestSeries(series=[MixerStopRequest()]) + + def create_initialization_request_series(self) -> MCTRequestSeries: + series: list[MCTRequest] = [MixerStartRequest()] + if self.configured_targets is not None: + series.append(PoseSolverSetTargetsRequest(targets=self.configured_targets)) + return MCTRequestSeries(series=series) + + def handle_deinitialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.DeinitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], EmptyResponse): + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"The deinitialization response was not of the expected type EmptyResponse.") + return Connection.DeinitializationResult.SUCCESS + + def handle_initialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.InitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"Expected exactly one response to initialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], EmptyResponse): + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=f"The initialization response was not of the expected type EmptyResponse.") + return Connection.InitializationResult.SUCCESS + + def supported_response_types(self) -> dict[str, type[MCTResponse]]: + type_dict: dict[str, type[MCTResponse]] = super().supported_response_types() + type_list: list[MCTResponse] = [ + ExtrinsicCalibrationCalculateResponse, + ExtrinsicCalibrationImageAddResponse, + ExtrinsicCalibrationImageGetResponse, + ExtrinsicCalibrationImageMetadataListResponse, + ExtrinsicCalibrationResultGetResponse, + ExtrinsicCalibrationResultGetActiveResponse, + ExtrinsicCalibrationResultMetadataListResponse, + PoseSolverAddTargetResponse, + PoseSolverGetPosesResponse] + type_dict.update({ + type_single.type_identifier(): type_single + for type_single in type_list}) + return type_dict diff --git a/src/controller/exceptions/__init__.py b/src/controller/exceptions/__init__.py deleted file mode 100644 index d9141c4..0000000 --- a/src/controller/exceptions/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .response_series_not_expected import ResponseSeriesNotExpected diff --git a/src/controller/exceptions/response_series_not_expected.py b/src/controller/exceptions/response_series_not_expected.py deleted file mode 100644 index d2f3abc..0000000 --- a/src/controller/exceptions/response_series_not_expected.py +++ /dev/null @@ -1,5 +0,0 @@ -from src.common.exceptions import MCTError - - -class ResponseSeriesNotExpected(MCTError): - pass diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index b3782a9..bcd66ed 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -1,46 +1,45 @@ -from .exceptions import ResponseSeriesNotExpected -from .structures import \ - ConnectionReport, \ - Connection, \ - DetectorConnection, \ - MCTComponentAddress, \ +from .configuration import \ MCTComponentConfig, \ MCTConfiguration, \ - PoseSolverConnection, \ StartupMode +from .connection import \ + Connection, \ + DetectorConnection, \ + PoseSolverConnection from src.common import \ + DetectorFrame, \ EmptyResponse, \ ErrorResponse, \ + IntrinsicParameters, \ MCTComponent, \ + MCTError, \ MCTRequest, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ + MixerFrame, \ + SeverityLabel, \ + StatusMessage, \ StatusMessageSource, \ TimestampGetRequest, \ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from src.common.structures import \ - ComponentRoleLabel, \ - COMPONENT_ROLE_LABEL_DETECTOR, \ - COMPONENT_ROLE_LABEL_POSE_SOLVER, \ - DetectorFrame, \ - IntrinsicParameters, \ - PoseSolverFrame -from src.detector.api import \ - CalibrationResultGetActiveRequest, \ - CalibrationResultGetActiveResponse, \ +from src.detector import \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse, \ CameraResolutionGetRequest, \ CameraResolutionGetResponse, \ + Detector, \ DetectorFrameGetRequest, \ DetectorFrameGetResponse -from src.pose_solver.api import \ +from src.mixer import \ + Mixer, \ + MixerUpdateIntrinsicParametersRequest, \ PoseSolverAddDetectorFrameRequest, \ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ - PoseSolverSetExtrinsicRequest, \ - PoseSolverSetIntrinsicRequest + PoseSolverSetExtrinsicRequest import datetime from enum import IntEnum, StrEnum import hjson @@ -50,31 +49,39 @@ import numpy import os from pydantic import ValidationError -from typing import Callable, Final, get_args, TypeVar +from typing import Callable, Final, TypeVar import uuid logger = logging.getLogger(__name__) ConnectionType = TypeVar('ConnectionType', bound=Connection) +_ROLE_LABEL: Final[str] = "controller" +_SUPPORTED_ROLES: Final[list[str]] = [ + Detector.get_role_label(), + Mixer.get_role_label()] _TIME_SYNC_SAMPLE_MAXIMUM_COUNT: Final[int] = 5 +class ResponseSeriesNotExpected(MCTError): + pass + + class MCTController(MCTComponent): class Status(StrEnum): - STOPPED: Final[int] = "Idle" - STARTING: Final[int] = "Starting" - RUNNING: Final[int] = "Running" - STOPPING: Final[int] = "Stopping" + STOPPED = "Idle" + STARTING = "Starting" + RUNNING = "Running" + STOPPING = "Stopping" class StartupState(IntEnum): - INITIAL: Final[int] = 0 - CONNECTING: Final[int] = 1 - TIME_SYNC_START: Final[int] = 2 - TIME_SYNC_STOP: Final[int] = 3 - GET_INTRINSICS: Final[int] = 4 - SET_INTRINSICS: Final[int] = 5 + INITIAL = 0 + CONNECTING = 1 + TIME_SYNC_START = 2 + TIME_SYNC_STOP = 3 + GET_INTRINSICS = 4 + SET_INTRINSICS = 5 _status_message_source: StatusMessageSource _status: Status @@ -113,13 +120,13 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: IPv4Address(connection.ip_address) except ValueError: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection {connection.label} has invalid IP address {connection.ip_address}. " "It will be skipped.") return False if connection.port < 0 or connection.port > 65535: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection {connection.label} has invalid port {connection.port}. " "It will be skipped.") return False @@ -128,10 +135,10 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: for detector in configuration.detectors: if not is_valid_ip_address(detector): continue - component_address: MCTComponentAddress = MCTComponentAddress( + component_address: Connection.ComponentAddress = Connection.ComponentAddress( label=detector.label, role="detector", - ip_address=detector.ip_address, + ip_address=IPv4Address(detector.ip_address), port=detector.port) detector_connection: DetectorConnection = self.add_connection(component_address=component_address) if detector.fixed_transform_to_reference is not None: @@ -140,13 +147,13 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: detector_connection.configured_camera_parameters = detector.camera_parameters if detector.marker_parameters is not None: detector_connection.configured_marker_parameters = detector.marker_parameters - for pose_solver in configuration.pose_solvers: + for pose_solver in configuration.mixers: if not is_valid_ip_address(pose_solver): continue - component_address: MCTComponentAddress = MCTComponentAddress( + component_address: Connection.ComponentAddress = Connection.ComponentAddress( label=pose_solver.label, - role="pose_solver", - ip_address=pose_solver.ip_address, + role="mixer", + ip_address=IPv4Address(pose_solver.ip_address), port=pose_solver.port) pose_solver_connection: PoseSolverConnection = self.add_connection(component_address=component_address) if pose_solver.solver_parameters is not None: @@ -156,16 +163,16 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: def add_connection( self, - component_address: MCTComponentAddress + component_address: Connection.ComponentAddress ) -> DetectorConnection | PoseSolverConnection: label = component_address.label if label in self._connections: raise RuntimeError(f"Connection associated with {label} already exists.") - if component_address.role == COMPONENT_ROLE_LABEL_DETECTOR: + if component_address.role == Detector.get_role_label(): return_value: DetectorConnection = DetectorConnection(component_address=component_address) self._connections[label] = return_value return return_value - elif component_address.role == COMPONENT_ROLE_LABEL_POSE_SOLVER: + elif component_address.role == Mixer.get_role_label(): return_value: PoseSolverConnection = PoseSolverConnection(component_address=component_address) self._connections[label] = return_value return return_value @@ -175,7 +182,7 @@ def add_connection( def _advance_startup_state(self) -> None: if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.CONNECTING: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="CONNECTING complete") component_labels: list[str] = self.get_component_labels(active=True) request_series: MCTRequestSeries = MCTRequestSeries(series=[TimeSyncStartRequest()]) @@ -192,11 +199,11 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.TIME_SYNC_START if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.TIME_SYNC_START: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="TIME_SYNC complete") component_labels: list[str] = self.get_component_labels(active=True) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - TimestampGetRequest(requester_timestamp_utc_iso8601=datetime.datetime.utcnow().isoformat())]) + TimestampGetRequest(requester_timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat())]) for component_label in component_labels: self._pending_request_ids.append( self.request_series_push( @@ -213,27 +220,27 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.TIME_SYNC_STOP if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.TIME_SYNC_STOP: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="STARTING_CAPTURE complete") detector_labels: list[str] = self.get_active_detector_labels() for detector_label in detector_labels: request_series: MCTRequestSeries = MCTRequestSeries( series=[ CameraResolutionGetRequest(), - CalibrationResultGetActiveRequest()]) + IntrinsicCalibrationResultGetActiveRequest()]) self._pending_request_ids.append(self.request_series_push( connection_label=detector_label, request_series=request_series)) self._startup_state = MCTController.StartupState.GET_INTRINSICS if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.GET_INTRINSICS: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="GET_INTRINSICS complete") if self._startup_mode == StartupMode.DETECTING_ONLY: self._startup_state = MCTController.StartupState.INITIAL self._status = MCTController.Status.RUNNING # We're done else: - pose_solver_labels: list[str] = self.get_active_pose_solver_labels() + pose_solver_labels: list[str] = self.get_active_mixer_labels() for pose_solver_label in pose_solver_labels: requests: list[MCTRequest] = list() for detector_label in self.get_active_detector_labels(): @@ -242,11 +249,11 @@ def _advance_startup_state(self) -> None: connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") continue if detector_connection.current_intrinsic_parameters is not None: - requests.append(PoseSolverSetIntrinsicRequest( + requests.append(MixerUpdateIntrinsicParametersRequest( detector_label=detector_label, intrinsic_parameters=detector_connection.current_intrinsic_parameters)) if detector_connection.configured_transform_to_reference is not None: @@ -260,7 +267,7 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.SET_INTRINSICS if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.SET_INTRINSICS: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="SET_INTRINSICS complete") self._startup_state = MCTController.StartupState.INITIAL self._status = MCTController.Status.RUNNING @@ -272,13 +279,13 @@ def get_active_detector_labels(self) -> list[str]: """ See get_component_labels. """ - return self.get_component_labels(role=COMPONENT_ROLE_LABEL_DETECTOR, active=True) + return self.get_component_labels(role=Detector.get_role_label(), active=True) - def get_active_pose_solver_labels(self) -> list[str]: + def get_active_mixer_labels(self) -> list[str]: """ See get_component_labels. """ - return self.get_component_labels(role=COMPONENT_ROLE_LABEL_POSE_SOLVER, active=True) + return self.get_component_labels(role=Mixer.get_role_label(), active=True) def get_component_labels( self, @@ -290,9 +297,8 @@ def get_component_labels( None provided to `role` or `active` is treated as a wildcard (i.e. not filtered on that criteria). """ if role is not None: - valid_roles: list[str] = list(get_args(ComponentRoleLabel)) - if role not in valid_roles: - raise ValueError(f"role must be among the valid values {str(valid_roles)}") + if role not in _SUPPORTED_ROLES: + raise ValueError(f"role must be among the valid values for ComponentRoleLabel") return_value: list[str] = list() for connection_label, connection in self._connections.items(): if role is not None and connection.get_role() != role: @@ -302,8 +308,8 @@ def get_component_labels( return_value.append(connection_label) return return_value - def get_connection_reports(self) -> list[ConnectionReport]: - return_value: list[ConnectionReport] = list() + def get_connection_reports(self) -> list[Connection.Report]: + return_value: list[Connection.Report] = list() for connection in self._connections.values(): return_value.append(connection.get_report()) return return_value @@ -351,7 +357,7 @@ def get_live_detector_frame( def get_live_pose_solver_frame( self, pose_solver_label: str - ) -> PoseSolverFrame | None: + ) -> MixerFrame | None: """ returns None if the pose solver does not exist, or has not been started, or if it has not yet gotten frames. """ @@ -360,11 +366,15 @@ def get_live_pose_solver_frame( connection_type=PoseSolverConnection) if pose_solver_connection is None: return None - return PoseSolverFrame( + return MixerFrame( detector_poses=pose_solver_connection.detector_poses, target_poses=pose_solver_connection.target_poses, timestamp_utc_iso8601=pose_solver_connection.poses_timestamp.isoformat()) + @staticmethod + def get_role_label(): + return _ROLE_LABEL + def get_status(self) -> Status: return self._status @@ -373,12 +383,12 @@ def handle_error_response( response: ErrorResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received error: {response.message}") def handle_response_calibration_result_get_active( self, - response: CalibrationResultGetActiveResponse, + response: IntrinsicCalibrationResultGetActiveResponse, detector_label: str ) -> None: detector_connection: DetectorConnection = self._get_connection( @@ -386,17 +396,17 @@ def handle_response_calibration_result_get_active( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") return if response.intrinsic_calibration is None: if detector_connection.current_resolution is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"No calibration was found for detector {detector_label}, and failed to get resolution.") return self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"No calibration was found for detector {detector_label}. " f"Zero parameters for active resolution {detector_connection.current_resolution} will be used.") detector_connection.current_intrinsic_parameters = IntrinsicParameters.generate_zero_parameters( @@ -415,7 +425,7 @@ def handle_response_camera_resolution_get( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") return detector_connection.current_resolution = response.resolution @@ -430,12 +440,12 @@ def handle_response_detector_frame_get( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") return frame: DetectorFrame = response.frame adjusted_timestamp_utc: datetime.datetime = \ - frame.timestamp_utc() - datetime.timedelta(seconds=detector_connection.controller_offset_seconds) + frame.timestamp_utc - datetime.timedelta(seconds=detector_connection.controller_offset_seconds) frame.timestamp_utc_iso8601 = adjusted_timestamp_utc.isoformat() detector_connection.latest_frame = frame @@ -449,13 +459,13 @@ def handle_response_get_poses( connection_type=PoseSolverConnection) if pose_solver_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find PoseSolverConnection with label {pose_solver_label}.") return pose_solver_connection.detector_poses = response.detector_poses pose_solver_connection.target_poses = response.target_poses pose_solver_connection.poses_timestamp = ( - datetime.datetime.utcnow() - # TODO: This should come from the pose solver + datetime.datetime.now(tz=datetime.timezone.utc) - # TODO: This should come from the pose solver datetime.timedelta(seconds=pose_solver_connection.controller_offset_seconds)) def handle_response_timestamp_get( @@ -466,7 +476,7 @@ def handle_response_timestamp_get( connection: Connection = self._get_connection( connection_label=component_label, connection_type=Connection) - utc_now: datetime.datetime = datetime.datetime.utcnow() + utc_now: datetime.datetime = datetime.datetime.now(tz=datetime.timezone.utc) requester_timestamp: datetime.datetime requester_timestamp = datetime.datetime.fromisoformat(response.requester_timestamp_utc_iso8601) round_trip_seconds: float = (utc_now - requester_timestamp).total_seconds() @@ -476,11 +486,11 @@ def handle_response_timestamp_get( network_plus_offset_seconds: float = (responder_timestamp - requester_timestamp).total_seconds() connection.network_plus_offset_samples_seconds.append(network_plus_offset_seconds) if self._time_sync_sample_count >= _TIME_SYNC_SAMPLE_MAXIMUM_COUNT: - connection.network_latency_seconds = numpy.median(connection.network_latency_samples_seconds) + connection.network_latency_seconds = float(numpy.median(connection.network_latency_samples_seconds)) connection.controller_offset_samples_seconds = [ network_plus_offset_sample_seconds - (connection.network_latency_seconds / 2.0) for network_plus_offset_sample_seconds in connection.network_plus_offset_samples_seconds] - connection.controller_offset_seconds = numpy.median(connection.controller_offset_samples_seconds) + connection.controller_offset_seconds = float(numpy.median(connection.controller_offset_samples_seconds)) print(f"Calculated offset to {connection.get_label()}: {connection.controller_offset_seconds}") def handle_response_unknown( @@ -488,7 +498,7 @@ def handle_response_unknown( response: MCTResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received unexpected response: {str(type(response))}") def handle_response_series( @@ -504,13 +514,13 @@ def handle_response_series( task_text = f" during {task_description}" if response_count < expected_response_count: self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Received a response series{task_text}, " f"but it contained fewer responses ({response_count}) " f"than expected ({expected_response_count}).") elif response_count > expected_response_count: self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Received a response series{task_text}, " f"but it contained more responses ({response_count}) " f"than expected ({expected_response_count}).") @@ -518,7 +528,7 @@ def handle_response_series( success: bool = True response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationResultGetActiveResponse): + if isinstance(response, IntrinsicCalibrationResultGetActiveResponse): self.handle_response_calibration_result_get_active( response=response, detector_label=response_series.responder) @@ -568,7 +578,7 @@ def recording_start( self._recording_save_path = save_path else: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Recording save path not defined") def recording_stop(self): @@ -578,13 +588,13 @@ def recording_stop(self): connection_type=Connection) report = connection.get_report() # Do not record if specified - if report.role == COMPONENT_ROLE_LABEL_DETECTOR and not self._recording_detector: + if report.role == Detector.get_role_label() and not self._recording_detector: continue - if report.role == COMPONENT_ROLE_LABEL_POSE_SOLVER and not self._recording_pose_solver: + if report.role == Mixer.get_role_label() and not self._recording_pose_solver: continue - if self._recording_save_path is not None: - frames_dict = [frame.dict() for frame in connection.recording] + if isinstance(connection, DetectorConnection) and self._recording_save_path is not None: + frames_dict = [frame.model_dump() for frame in connection.recording] frames_json = json.dumps(frames_dict) with open(os.path.join(self._recording_save_path, report.role+"_log.json"), 'w') as f: f.write(frames_json) @@ -685,16 +695,16 @@ def start_up( raise RuntimeError("Cannot start up if controller isn't first stopped.") for connection in self._connections.values(): if mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == COMPONENT_ROLE_LABEL_POSE_SOLVER: + connection.get_role() == Mixer.get_role_label(): continue connection.start_up() self._startup_state = MCTController.StartupState.CONNECTING self._status = MCTController.Status.STARTING - self.recording_start(save_path="/home/adminpi5", - record_pose_solver=True, - record_detector=True) + # self.recording_start(save_path="/home/adminpi5", + # record_pose_solver=True, + # record_detector=True) def shut_down(self) -> None: if self._status != MCTController.Status.RUNNING: @@ -707,8 +717,8 @@ def shut_down(self) -> None: self.recording_stop() - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: - return super().supported_request_types() + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + return super().supported_request_methods() # Right now this function doesn't update on its own - must be called externally and regularly def update( @@ -717,13 +727,20 @@ def update( connections = list(self._connections.values()) for connection in connections: connection.update() + status_messages: list[StatusMessage] = connection.dequeue_status_messages() + for status_message in status_messages: + self._status_message_source.enqueue_status_message( + severity=status_message.severity, + message=status_message.message, + source_label=status_message.source_label, + timestamp_utc_iso8601=status_message.timestamp_utc_iso8601) if self._status == MCTController.Status.STARTING and \ self._startup_state == MCTController.StartupState.CONNECTING: all_connected: bool = True for connection in connections: if self._startup_mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == COMPONENT_ROLE_LABEL_POSE_SOLVER: + connection.get_role() == Mixer.get_role_label(): continue if not connection.is_start_up_finished(): all_connected = False @@ -746,7 +763,7 @@ def update( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") continue if detector_connection.request_id is not None: @@ -756,13 +773,13 @@ def update( detector_connection.request_id = self.request_series_push( connection_label=detector_label, request_series=MCTRequestSeries(series=[DetectorFrameGetRequest()])) - for pose_solver_label in self.get_active_pose_solver_labels(): + for pose_solver_label in self.get_active_mixer_labels(): pose_solver_connection: PoseSolverConnection = self._get_connection( connection_label=pose_solver_label, connection_type=PoseSolverConnection) if pose_solver_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find PoseSolverConnection with label {pose_solver_label}.") continue if pose_solver_connection.request_id is not None: @@ -779,7 +796,7 @@ def update( detector_label=detector_label) if current_detector_frame is None: continue - current_detector_frame_timestamp: datetime.datetime = current_detector_frame.timestamp_utc() + current_detector_frame_timestamp: datetime.datetime = current_detector_frame.timestamp_utc current_is_new: bool = False if detector_label in pose_solver_connection.detector_timestamps: old_detector_frame_timestamp = \ @@ -791,9 +808,9 @@ def update( if current_is_new: pose_solver_connection.detector_timestamps[detector_label] = \ current_detector_frame_timestamp - adjusted_detector_frame: DetectorFrame = current_detector_frame.copy() + adjusted_detector_frame: DetectorFrame = current_detector_frame.model_copy() adjusted_timestamp_utc: datetime.datetime = \ - current_detector_frame.timestamp_utc() + \ + current_detector_frame.timestamp_utc + \ datetime.timedelta(seconds=pose_solver_connection.controller_offset_seconds) adjusted_detector_frame.timestamp_utc_iso8601 = adjusted_timestamp_utc.isoformat() marker_request: PoseSolverAddDetectorFrameRequest = PoseSolverAddDetectorFrameRequest( diff --git a/src/controller/structures/__init__.py b/src/controller/structures/__init__.py deleted file mode 100644 index 3f7735b..0000000 --- a/src/controller/structures/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .connection import Connection -from .connection_report import ConnectionReport -from .detector_connection import DetectorConnection -from .mct_component_address import MCTComponentAddress -from .mct_configuration import \ - MCTComponentConfig, \ - MCTConfiguration -from .pose_solver_connection import PoseSolverConnection -from .startup_mode import StartupMode diff --git a/src/controller/structures/connection_report.py b/src/controller/structures/connection_report.py deleted file mode 100644 index b4573af..0000000 --- a/src/controller/structures/connection_report.py +++ /dev/null @@ -1,22 +0,0 @@ -from pydantic import BaseModel, Field - - -class ConnectionReport(BaseModel): - """ - Human-readable information that shall be shown to a user about a connection. - """ - label: str = Field() - role: str = Field() - ip_address: str = Field() - port: int = Field() - status: str = Field() - - def __eq__(self, other): - if not isinstance(other, ConnectionReport): - return False - return ( - self.label == other.label and - self.role == other.role and - self.ip_address == other.ip_address and - self.port == other.port and - self.status == other.status) diff --git a/src/controller/structures/detector_connection.py b/src/controller/structures/detector_connection.py deleted file mode 100644 index c15fad1..0000000 --- a/src/controller/structures/detector_connection.py +++ /dev/null @@ -1,122 +0,0 @@ -from .mct_component_address import MCTComponentAddress -from .connection import Connection -from src.common.api import \ - EmptyResponse, \ - MCTRequest, \ - MCTRequestSeries, \ - MCTResponse, \ - MCTResponseSeries -from src.common.structures import \ - DetectorFrame, \ - ImageResolution, \ - IntrinsicParameters, \ - KeyValueSimpleAny, \ - Matrix4x4 -from src.detector.api import \ - CalibrationCalculateResponse, \ - CalibrationImageAddResponse, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListResponse, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetResponse, \ - CalibrationResultGetActiveResponse, \ - CalibrationResultMetadataListResponse, \ - CameraImageGetResponse, \ - CameraParametersGetResponse, \ - CameraParametersSetRequest, \ - CameraParametersSetResponse, \ - CameraResolutionGetResponse, \ - DetectorFrameGetResponse, \ - DetectorStartRequest, \ - DetectorStopRequest, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest -import uuid - - -class DetectorConnection(Connection): - - configured_transform_to_reference: Matrix4x4 | None - configured_camera_parameters: list[KeyValueSimpleAny] | None - configured_marker_parameters: list[KeyValueSimpleAny] | None - - # These are variables used directly by the MCTController for storing data - request_id: uuid.UUID | None - current_resolution: ImageResolution | None - current_intrinsic_parameters: IntrinsicParameters | None - latest_frame: DetectorFrame | None - recording: list[DetectorFrame] | None - - def __init__( - self, - component_address: MCTComponentAddress - ): - super().__init__(component_address=component_address) - - self.configured_transform_to_reference = None - self.configured_camera_parameters = None - self.configured_marker_parameters = None - - self.request_id = None - self.current_resolution = None - self.current_intrinsic_parameters = None - self.latest_frame = None - self.recording = [] - - def create_deinitialization_request_series(self) -> MCTRequestSeries: - return MCTRequestSeries(series=[DetectorStopRequest()]) - - def create_initialization_request_series(self) -> MCTRequestSeries: - series: list[MCTRequest] = [DetectorStartRequest()] - if self.configured_camera_parameters is not None: - series.append(CameraParametersSetRequest(parameters=self.configured_camera_parameters)) - if self.configured_marker_parameters is not None: - series.append(MarkerParametersSetRequest(parameters=self.configured_marker_parameters)) - return MCTRequestSeries(series=series) - - def handle_deinitialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.DeinitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], (EmptyResponse, CameraParametersSetResponse)): - self.enqueue_status_message( - severity="warning", - message=f"The deinitialization response was not of the expected type EmptyResponse.") - return Connection.DeinitializationResult.SUCCESS - - def handle_initialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.InitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to initialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], EmptyResponse): - self.enqueue_status_message( - severity="warning", - message=f"The initialization response was not of the expected type EmptyResponse.") - return Connection.InitializationResult.SUCCESS - - def supported_response_types(self) -> list[type[MCTResponse]]: - return super().supported_response_types() + [ - CalibrationCalculateResponse, - CalibrationImageAddResponse, - CalibrationImageGetResponse, - CalibrationImageMetadataListResponse, - CalibrationResolutionListResponse, - CalibrationResultGetResponse, - CalibrationResultGetActiveResponse, - CalibrationResultMetadataListResponse, - CameraImageGetResponse, - CameraParametersGetResponse, - CameraParametersSetResponse, - CameraResolutionGetResponse, - DetectorFrameGetResponse, - MarkerParametersGetResponse] diff --git a/src/controller/structures/mct_component_address.py b/src/controller/structures/mct_component_address.py deleted file mode 100644 index 955d7d1..0000000 --- a/src/controller/structures/mct_component_address.py +++ /dev/null @@ -1,14 +0,0 @@ -from src.common.structures.component_role_label import ComponentRoleLabel -from ipaddress import IPv4Address -from pydantic import BaseModel, Field - - -class MCTComponentAddress(BaseModel): - """ - Information used to establish a connection, - there is nothing that should change here without a user's explicit input. - """ - label: str = Field() - role: ComponentRoleLabel = Field() - ip_address: IPv4Address = Field() - port: int = Field() diff --git a/src/controller/structures/pose_solver_connection.py b/src/controller/structures/pose_solver_connection.py deleted file mode 100644 index 0dd6ea5..0000000 --- a/src/controller/structures/pose_solver_connection.py +++ /dev/null @@ -1,94 +0,0 @@ -from src.common.structures.pose_solver_frame import PoseSolverFrame -from .mct_component_address import MCTComponentAddress -from .connection import Connection -from src.common.api import \ - EmptyResponse, \ - MCTRequest, \ - MCTRequestSeries, \ - MCTResponse, \ - MCTResponseSeries -from src.common.structures import \ - KeyValueSimpleAny, \ - Pose, \ - TargetBase -from src.pose_solver.api import \ - PoseSolverGetPosesResponse, \ - PoseSolverSetTargetsRequest, \ - PoseSolverStartRequest, \ - PoseSolverStopRequest -import datetime -import uuid - - -class PoseSolverConnection(Connection): - - # These are variables used directly by the MCTController for storing data - - configured_solver_parameters: list[KeyValueSimpleAny] | None - configured_targets: list[TargetBase] | None - - request_id: uuid.UUID | None - detector_poses: list[Pose] - target_poses: list[Pose] - detector_timestamps: dict[str, datetime.datetime] # access by detector_label - poses_timestamp: datetime.datetime - recording: list[PoseSolverFrame] | None - - def __init__( - self, - component_address: MCTComponentAddress - ): - super().__init__(component_address=component_address) - - self.configured_solver_parameters = None - self.configured_targets = None - - self.request_id = None - self.detector_poses = list() - self.target_poses = list() - self.detector_timestamps = dict() - self.poses_timestamp = datetime.datetime.min - self.recording = [] - - def create_deinitialization_request_series(self) -> MCTRequestSeries: - return MCTRequestSeries(series=[PoseSolverStopRequest()]) - - def create_initialization_request_series(self) -> MCTRequestSeries: - series: list[MCTRequest] = [PoseSolverStartRequest()] - if self.configured_targets is not None: - series.append(PoseSolverSetTargetsRequest(targets=self.configured_targets)) - return MCTRequestSeries(series=series) - - def handle_deinitialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.DeinitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], EmptyResponse): - self.enqueue_status_message( - severity="warning", - message=f"The deinitialization response was not of the expected type EmptyResponse.") - return Connection.DeinitializationResult.SUCCESS - - def handle_initialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.InitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to initialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], EmptyResponse): - self.enqueue_status_message( - severity="warning", - message=f"The initialization response was not of the expected type EmptyResponse.") - return Connection.InitializationResult.SUCCESS - - def supported_response_types(self) -> list[type[MCTResponse]]: - return super().supported_response_types() + [ - PoseSolverGetPosesResponse] diff --git a/src/controller/structures/startup_mode.py b/src/controller/structures/startup_mode.py deleted file mode 100644 index 9c085fe..0000000 --- a/src/controller/structures/startup_mode.py +++ /dev/null @@ -1,7 +0,0 @@ -from enum import StrEnum -from typing import Final - - -class StartupMode(StrEnum): - DETECTING_ONLY: Final[str] = "detecting_only" - DETECTING_AND_SOLVING: Final[str] = "detecting_and_solving" diff --git a/src/detector/__init__.py b/src/detector/__init__.py index a14ed65..8839013 100644 --- a/src/detector/__init__.py +++ b/src/detector/__init__.py @@ -1,6 +1,37 @@ -from .calibrator import Calibrator -from .detector import Detector -from .interfaces import \ - AbstractCamera, \ - AbstractMarker -from .structures import DetectorConfiguration +from .api import \ + IntrinsicCalibrationCalculateRequest, \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationDeleteStagedRequest, \ + IntrinsicCalibrationImageAddRequest, \ + IntrinsicCalibrationImageAddResponse, \ + IntrinsicCalibrationImageGetRequest, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListRequest, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationImageMetadataUpdateRequest, \ + IntrinsicCalibrationResolutionListRequest, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetRequest, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultMetadataListRequest, \ + IntrinsicCalibrationResultMetadataListResponse, \ + IntrinsicCalibrationResultMetadataUpdateRequest, \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + CameraParametersGetRequest, \ + CameraParametersGetResponse, \ + CameraParametersSetRequest, \ + CameraParametersSetResponse, \ + CameraResolutionGetRequest, \ + CameraResolutionGetResponse, \ + DetectorFrameGetRequest, \ + DetectorFrameGetResponse, \ + DetectorStartRequest, \ + DetectorStopRequest, \ + AnnotatorParametersGetRequest, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest +from .detector import \ + Detector diff --git a/src/detector/api.py b/src/detector/api.py index f56cea4..d32d55a 100644 --- a/src/detector/api.py +++ b/src/detector/api.py @@ -1,449 +1,342 @@ from src.common import \ - MCTRequest, \ - MCTResponse -from src.common.structures import \ - CaptureFormat, \ DetectorFrame, \ - IntrinsicCalibration, \ + ImageFormat, \ ImageResolution, \ + IntrinsicCalibration, \ + IntrinsicCalibrator, \ KeyValueMetaAny, \ - KeyValueSimpleAny -from .structures import \ - CalibrationImageMetadata, \ - CalibrationImageState, \ - CalibrationResultMetadata, \ - CalibrationResultState + KeyValueSimpleAny, \ + MCTRequest, \ + MCTResponse from pydantic import Field, SerializeAsAny -from typing import Final, Literal -class CalibrationCalculateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_calculate" - +class AnnotatorParametersGetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationCalculateRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_annotator_parameters_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - image_resolution: ImageResolution = Field() - - -class CalibrationCalculateResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_calculate" +class AnnotatorParametersGetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationCalculateResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_annotator_parameters_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - result_identifier: str = Field() - intrinsic_calibration: IntrinsicCalibration = Field() + parsable_type: str = Field(default=type_identifier()) + parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() -class CalibrationDeleteStagedRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_delete_staged" +class AnnotatorParametersSetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationDeleteStagedRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_annotator_parameters_set" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) + parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() -class CalibrationImageAddRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_add" +class CameraImageGetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationImageAddRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_camera_image_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) + format: ImageFormat = Field() + requested_resolution: ImageResolution | None = Field(default=None) -class CalibrationImageAddResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_add" +class CameraImageGetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationImageAddResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + def type_identifier() -> str: + return "detector_camera_image_get" - image_identifier: str = Field() + parsable_type: str = Field(default=type_identifier()) + format: ImageFormat = Field() + image_base64: str = Field() + original_resolution: ImageResolution = Field() -class CalibrationImageGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_get" +class CameraParametersGetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationImageGetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_camera_parameters_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - image_identifier: str = Field() - - -class CalibrationImageGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_get" +class CameraParametersGetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationImageGetResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_camera_parameters_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - image_base64: str = Field() + parsable_type: str = Field(default=type_identifier()) + parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() -class CalibrationImageMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_list" +class CameraParametersSetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationImageMetadataListRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_camera_parameters_set" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - image_resolution: ImageResolution = Field() + parsable_type: str = Field(default=type_identifier()) + parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() -class CalibrationImageMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_list" +class CameraParametersSetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationImageMetadataListResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_camera_parameters_set" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - metadata_list: list[CalibrationImageMetadata] = Field(default_factory=list) + parsable_type: str = Field(default=type_identifier()) + resolution: ImageResolution = Field() # Sometimes parameter changes may result in changes of resolution -class CalibrationImageMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_update" +class CameraResolutionGetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationImageMetadataUpdateRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_camera_resolution_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - image_identifier: str = Field() - image_state: CalibrationImageState = Field() - image_label: str | None = Field(default=None) + parsable_type: str = Field(default=type_identifier()) -class CalibrationResolutionListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_detector_resolutions_list" - +class CameraResolutionGetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResolutionListRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_camera_resolution_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) + resolution: ImageResolution = Field() -class CalibrationResolutionListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_detector_resolutions_list" +class DetectorFrameGetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResolutionListResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_frame_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - resolutions: list[ImageResolution] = Field() + parsable_type: str = Field(default=type_identifier()) + include_detected: bool = Field(default=True) + include_rejected: bool = Field(default=True) -class CalibrationResultGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_get" +class DetectorFrameGetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResultGetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_frame_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - result_identifier: str = Field() + parsable_type: str = Field(default=type_identifier()) + frame: DetectorFrame = Field() -class CalibrationResultGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_get" +class DetectorStartRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResultGetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - intrinsic_calibration: IntrinsicCalibration = Field() + def type_identifier() -> str: + return "detector_start" + parsable_type: str = Field(default=type_identifier()) -class CalibrationResultGetActiveRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_active_get" +class DetectorStopRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResultGetActiveRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + def type_identifier() -> str: + return "detector_stop" + parsable_type: str = Field(default=type_identifier()) -class CalibrationResultGetActiveResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_active_get" +class IntrinsicCalibrationCalculateRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResultGetActiveResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + def type_identifier() -> str: + return "detector_intrinsic_calibration_calculate" - intrinsic_calibration: IntrinsicCalibration | None = Field() + parsable_type: str = Field(default=type_identifier()) + image_resolution: ImageResolution = Field() -class CalibrationResultMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_list" +class IntrinsicCalibrationCalculateResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResultMetadataListRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + def type_identifier() -> str: + return "detector_intrinsic_calibration_calculate" - image_resolution: ImageResolution = Field() + parsable_type: str = Field(default=type_identifier()) + result_identifier: str = Field() + intrinsic_calibration: IntrinsicCalibration = Field() -class CalibrationResultMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_list" +class IntrinsicCalibrationDeleteStagedRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResultMetadataListResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + def type_identifier() -> str: + return "detector_intrinsic_calibration_delete_staged" - metadata_list: list[CalibrationResultMetadata] = Field(default_factory=list) + parsable_type: str = Field(default=type_identifier()) -class CalibrationResultMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_update" - +class IntrinsicCalibrationImageAddRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CalibrationResultMetadataUpdateRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_image_add" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - result_identifier: str = Field() - result_state: CalibrationResultState = Field() - result_label: str | None = Field(default=None) - - -class CameraImageGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" +class IntrinsicCalibrationImageAddResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CameraImageGetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_image_add" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - format: CaptureFormat = Field() - requested_resolution: ImageResolution | None = Field(default=None) + image_identifier: str = Field() -class CameraImageGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" - +class IntrinsicCalibrationImageGetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CameraImageGetResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_image_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - format: CaptureFormat = Field() - image_base64: str = Field() + image_identifier: str = Field() -class CameraParametersGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" - +class IntrinsicCalibrationImageGetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CameraParametersGetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_image_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) + image_base64: str = Field() -class CameraParametersGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" +class IntrinsicCalibrationImageMetadataListRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CameraParametersGetResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_image_metadata_list" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() + parsable_type: str = Field(default=type_identifier()) + image_resolution: ImageResolution = Field() -class CameraParametersSetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" +class IntrinsicCalibrationImageMetadataListResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CameraParametersSetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_image_metadata_list" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() + parsable_type: str = Field(default=type_identifier()) + metadata_list: list[IntrinsicCalibrator.ImageMetadata] = Field(default_factory=list) -class CameraParametersSetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" +class IntrinsicCalibrationImageMetadataUpdateRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CameraParametersSetResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_image_metadata_update" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - resolution: ImageResolution = Field() # Sometimes parameter changes may result in changes of resolution + parsable_type: str = Field(default=type_identifier()) + image_identifier: str = Field() + image_state: IntrinsicCalibrator.ImageState = Field() + image_label: str | None = Field(default=None) -class CameraResolutionGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" +class IntrinsicCalibrationResolutionListRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return CameraResolutionGetRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + def type_identifier() -> str: + return "detector_intrinsic_calibration_detector_resolutions_list" + parsable_type: str = Field(default=type_identifier()) -class CameraResolutionGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" +class IntrinsicCalibrationResolutionListResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return CameraResolutionGetResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_detector_resolutions_list" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - resolution: ImageResolution = Field() + parsable_type: str = Field(default=type_identifier()) + resolutions: list[ImageResolution] = Field() -class DetectorFrameGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" +class IntrinsicCalibrationResultGetRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return DetectorFrameGetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_result_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - include_detected: bool = Field(default=True) - include_rejected: bool = Field(default=True) + result_identifier: str = Field() -class DetectorFrameGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" - +class IntrinsicCalibrationResultGetResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return DetectorFrameGetResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_result_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - frame: DetectorFrame = Field() + intrinsic_calibration: IntrinsicCalibration = Field() -class DetectorStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_start" - +class IntrinsicCalibrationResultGetActiveRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return DetectorStartRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + def type_identifier() -> str: + return "detector_intrinsic_calibration_result_active_get" + parsable_type: str = Field(default=type_identifier()) -class DetectorStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_stop" +class IntrinsicCalibrationResultGetActiveResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return DetectorStopRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_result_active_get" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) + intrinsic_calibration: IntrinsicCalibration = Field() -class MarkerParametersGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" +class IntrinsicCalibrationResultMetadataListRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return MarkerParametersGetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_result_metadata_list" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) + image_resolution: ImageResolution = Field() -class MarkerParametersGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" +class IntrinsicCalibrationResultMetadataListResponse(MCTResponse): @staticmethod - def parsable_type_identifier() -> str: - return MarkerParametersGetResponse._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_result_metadata_list" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() + parsable_type: str = Field(default=type_identifier()) + metadata_list: list[IntrinsicCalibrator.ResultMetadata] = Field(default_factory=list) -class MarkerParametersSetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_set" +class IntrinsicCalibrationResultMetadataUpdateRequest(MCTRequest): @staticmethod - def parsable_type_identifier() -> str: - return MarkerParametersSetRequest._TYPE_IDENTIFIER + def type_identifier() -> str: + return "detector_intrinsic_calibration_result_metadata_update" - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) - parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() + result_identifier: str = Field() + result_state: IntrinsicCalibrator.ResultState = Field() + result_label: str | None = Field(default=None) diff --git a/src/detector/detector_app.py b/src/detector/app.py similarity index 61% rename from src/detector/detector_app.py rename to src/detector/app.py index 5e3ff72..4e68b8f 100644 --- a/src/detector/detector_app.py +++ b/src/detector/app.py @@ -1,73 +1,80 @@ +from .api import \ + IntrinsicCalibrationResultGetActiveResponse, \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + CameraParametersGetResponse, \ + CameraResolutionGetResponse, \ + DetectorFrameGetRequest, \ + DetectorFrameGetResponse, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest +from .detector import \ + Detector from src.common import \ - client_identifier_from_connection, \ + Annotator, \ + Camera, \ EmptyResponse, \ ErrorResponse, \ + ImageFormat, \ + IntrinsicCalibrator, \ TimestampGetRequest, \ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from src.detector import \ - Detector, \ - DetectorConfiguration -from src.detector.api import \ - CalibrationResultGetActiveResponse, \ - CameraImageGetRequest, \ - CameraImageGetResponse, \ - CameraParametersGetResponse, \ - CameraResolutionGetResponse, \ - DetectorFrameGetRequest, \ - DetectorFrameGetResponse, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest -from src.detector.interfaces import \ - AbstractCamera, \ - AbstractMarker import asyncio import base64 -from fastapi import FastAPI, Request +from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.websockets import WebSocket import hjson import logging import os +from typing import Final logger = logging.getLogger(__name__) +CONFIGURATION_FILEPATH_ENV_VAR: Final[str] = "MCSTRACK_DETECTOR_CONFIGURATION_FILEPATH" + def create_app() -> FastAPI: - detector_configuration_filepath: str = \ - os.path.join(os.path.dirname(__file__), "..", "..", "data", "detector_config.json") - detector_configuration: DetectorConfiguration + detector_configuration_filepath: str = os.path.join( + os.path.dirname(__file__), "..", "..", "data", "configuration", "detector", "opencv_aruco.json") + detector_configuration_filepath = os.getenv(CONFIGURATION_FILEPATH_ENV_VAR, detector_configuration_filepath) + detector_configuration: Detector.Configuration with open(detector_configuration_filepath, 'r') as infile: detector_configuration_file_contents: str = infile.read() detector_configuration_dict = hjson.loads(detector_configuration_file_contents) - detector_configuration = DetectorConfiguration(**detector_configuration_dict) + detector_configuration = Detector.Configuration(**detector_configuration_dict) # Eventually it would be preferable to put the initialization logic/mapping below into an abstract factory, # and allow end-users to register custom classes that are not necessarily shipped within this library. - camera_type: type[AbstractCamera] - if detector_configuration.camera_configuration.driver == "opencv_capture_device": - from src.detector.implementations.camera_opencv_capture_device import OpenCVCaptureDeviceCamera + camera_type: type[Camera] + if detector_configuration.camera.implementation == "opencv_capture_device": + from src.implementations.camera_opencv_capture_device import OpenCVCaptureDeviceCamera camera_type = OpenCVCaptureDeviceCamera - elif detector_configuration.camera_configuration.driver == "picamera2": - from src.detector.implementations.camera_picamera2 import Picamera2Camera + elif detector_configuration.camera.implementation == "picamera2": + from src.implementations.camera_picamera2 import Picamera2Camera camera_type = Picamera2Camera else: raise RuntimeError(f"Unsupported camera driver {detector_configuration.camera_configuration.driver}.") - marker_type: type[AbstractMarker] - if detector_configuration.marker_configuration.method == "aruco_opencv": - from src.detector.implementations.marker_aruco_opencv import ArucoOpenCVMarker - marker_type = ArucoOpenCVMarker + marker_type: type[Annotator] + if detector_configuration.annotator.implementation == "aruco_opencv": + from src.implementations.annotator_aruco_opencv import ArucoOpenCVAnnotator + marker_type = ArucoOpenCVAnnotator else: - raise RuntimeError(f"Unsupported marker method {detector_configuration.marker_configuration.method}.") + raise RuntimeError(f"Unsupported marker method {detector_configuration.annotator_configuration.method}.") + + from src.implementations.intrinsic_charuco_opencv import CharucoOpenCVIntrinsicCalibrator + intrinsic_calibrator_type: type[IntrinsicCalibrator] = CharucoOpenCVIntrinsicCalibrator detector = Detector( detector_configuration=detector_configuration, camera_type=camera_type, - marker_type=marker_type) + annotator_type=marker_type, + intrinsic_calibrator_type=intrinsic_calibrator_type) detector_app = FastAPI() # CORS Middleware @@ -79,19 +86,24 @@ def create_app() -> FastAPI: allow_methods=["*"], allow_headers=["*"]) + @detector_app.get("/annotator/get_parameters") + async def annotator_get_parameters() -> AnnotatorParametersGetResponse | ErrorResponse: + return detector.annotator_parameters_get() + + @detector_app.post("/annotator/set_parameters") + async def annotator_set_parameters( + request: AnnotatorParametersSetRequest + ) -> EmptyResponse | ErrorResponse: + return detector.annotator_parameters_set( + request=request) + @detector_app.head("/detector/start") - async def detector_start( - http_request: Request - ) -> None: - client_identifier: str = client_identifier_from_connection(connection=http_request) - detector.detector_start(client_identifier=client_identifier) + async def detector_start() -> None: + detector.detector_start() @detector_app.head("/detector/stop") - async def detector_stop( - http_request: Request - ) -> None: - client_identifier: str = client_identifier_from_connection(connection=http_request) - detector.detector_stop(client_identifier=client_identifier) + async def detector_stop() -> None: + detector.detector_stop() @detector_app.post("/detector/start_time_sync") async def start_time_sync( @@ -121,13 +133,13 @@ async def get_timestamp( return detector.timestamp_get() @detector_app.get("/calibration/get_result_active") - async def calibration_get_result_active() -> CalibrationResultGetActiveResponse: + async def calibration_get_result_active() -> IntrinsicCalibrationResultGetActiveResponse: return detector.calibration_result_get_active() @detector_app.get("/camera/get_image") async def camera_get_image() -> CameraImageGetResponse: result: CameraImageGetResponse = detector.camera_image_get( - request=CameraImageGetRequest(format=".png")) + request=CameraImageGetRequest(format=ImageFormat.FORMAT_PNG)) image_bytes = base64.b64decode(result.image_base64) with open("test.png", "wb") as image_file: image_file.write(image_bytes) @@ -142,17 +154,6 @@ async def camera_get_parameters() -> CameraParametersGetResponse: async def camera_get_resolution() -> CameraResolutionGetResponse: return detector.camera_resolution_get() - @detector_app.get("/marker/get_parameters") - async def marker_get_parameters() -> MarkerParametersGetResponse | ErrorResponse: - return detector.marker_parameters_get() - - @detector_app.post("/marker/set_parameters") - async def marker_set_parameters( - request: MarkerParametersSetRequest - ) -> EmptyResponse | ErrorResponse: - return detector.marker_parameters_set( - request=request) - @detector_app.websocket("/websocket") async def websocket_handler(websocket: WebSocket) -> None: await detector.websocket_handler(websocket=websocket) diff --git a/src/detector/calibrator.py b/src/detector/calibrator.py deleted file mode 100644 index bc661bc..0000000 --- a/src/detector/calibrator.py +++ /dev/null @@ -1,624 +0,0 @@ -from .exceptions import \ - MCTDetectorRuntimeError -from .structures import \ - CalibratorConfiguration, \ - CalibrationImageMetadata, \ - CalibrationImageState, \ - CalibrationMap, \ - CalibrationMapValue, \ - CalibrationResultMetadata, \ - CalibrationResultState -from .util import assign_key_value_list_to_aruco_detection_parameters -from src.common import \ - ImageCoding, \ - StatusMessageSource -from src.common.structures import \ - CharucoBoardSpecification, \ - ImageResolution, \ - IntrinsicCalibration, \ - IntrinsicCalibrationFrameResult, \ - IntrinsicParameters, \ - KeyValueSimpleAny, \ - Vec3 -from src.common.util import \ - IOUtils -import cv2 -import cv2.aruco -import datetime -import json -from json import JSONDecodeError -import logging -import numpy -import os -from pydantic import ValidationError -from typing import Final -import uuid - - -logger = logging.getLogger(__name__) - - -class Calibrator: - - _configuration: CalibratorConfiguration - _calibration_map: dict[ImageResolution, CalibrationMapValue] - _status_message_source: StatusMessageSource - - CALIBRATION_MAP_FILENAME: Final[str] = "calibration_map.json" - - IMAGE_FORMAT: Final[str] = ".png" # work in lossless image format - RESULT_FORMAT: Final[str] = ".json" - - def __init__( - self, - configuration: CalibratorConfiguration, - status_message_source: StatusMessageSource - ): - self._configuration = configuration - self._status_message_source = status_message_source - if not self._exists_on_filesystem(path=self._configuration.data_path, pathtype="path", create_path=True): - self._status_message_source.enqueue_status_message( - severity="critical", - message="Data path does not exist and could not be created.") - detailed_message: str = f"{self._configuration.data_path} does not exist and could not be created." - logger.critical(detailed_message) - raise RuntimeError(detailed_message) - if not self.load(): - message: str = "The calibration map could not be loaded or created. "\ - "In order to avoid data loss, the software will now abort. " \ - "Please manually correct or remove the file in the filesystem." - logger.critical(message) - self._status_message_source.enqueue_status_message(severity="critical", message=message) - raise RuntimeError(message) - - def add_image( - self, - image_base64: str - ) -> str: # id of image - image_data: numpy.ndarray = ImageCoding.base64_to_image(input_base64=image_base64, color_mode="color") - map_key: ImageResolution = ImageResolution(x_px=image_data.shape[1], y_px=image_data.shape[0]) - # Before making any changes to the calibration map, make sure folders exist, - # and that this file does not somehow already exist (highly unlikely) - key_path: str = self._path_for_map_key(map_key=map_key) - if not self._exists_on_filesystem(path=key_path, pathtype="path", create_path=True): - raise MCTDetectorRuntimeError(message=f"Failed to create storage location for input image.") - image_identifier: str = str(uuid.uuid4()) - image_filepath = self._image_filepath( - map_key=map_key, - image_identifier=image_identifier) - if os.path.exists(image_filepath): - raise MCTDetectorRuntimeError( - message=f"Image {image_identifier} appears to already exist. This is never expected to occur. " - f"Please try again, and if this error continues to occur then please report a bug.") - if map_key not in self._calibration_map: - self._calibration_map[map_key] = CalibrationMapValue() - self._calibration_map[map_key].image_metadata_list.append( - CalibrationImageMetadata(identifier=image_identifier)) - # noinspection PyTypeChecker - image_bytes = ImageCoding.image_to_bytes(image_data=image_data, image_format=Calibrator.IMAGE_FORMAT) - with (open(image_filepath, 'wb') as in_file): - in_file.write(image_bytes) - self.save() - return image_identifier - - def calculate( - self, - image_resolution: ImageResolution, - marker_parameters: list[KeyValueSimpleAny] - ) -> tuple[str, IntrinsicCalibration]: - - calibration_key: ImageResolution = image_resolution - if calibration_key not in self._calibration_map: - raise MCTDetectorRuntimeError( - message=f"No images for given resolution {str(image_resolution)} found.") - - aruco_detector_parameters: ... = cv2.aruco.DetectorParameters_create() - mismatched_keys: list[str] = assign_key_value_list_to_aruco_detection_parameters( - detection_parameters=aruco_detector_parameters, - key_value_list=marker_parameters) - if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( - message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") - - # TODO: ChArUco board to come from somewhere (user? currently assumed to be 10x8 DICT4x4_100) - charuco_spec = CharucoBoardSpecification() - # noinspection PyUnresolvedReferences - charuco_board: cv2.aruco.CharucoBoard = charuco_spec.create_board() - - calibration_value: CalibrationMapValue = self._calibration_map[calibration_key] - all_charuco_corners = list() - all_charuco_ids = list() - image_identifiers: list[str] = list() - for image_metadata in calibration_value.image_metadata_list: - if image_metadata.state != CalibrationImageState.SELECT: - continue - image_filepath: str = self._image_filepath( - map_key=calibration_key, - image_identifier=image_metadata.identifier) - if not self._exists_on_filesystem(path=image_filepath, pathtype="filepath"): - self._status_message_source.enqueue_status_message( - severity="warning", - message=f"Image {image_metadata.identifier} was not found. " - f"It will be omitted from the calibration.") - continue - image_rgb = cv2.imread(image_filepath) - image_greyscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) - (marker_corners, marker_ids, _) = cv2.aruco.detectMarkers( - image=image_greyscale, - dictionary=charuco_spec.aruco_dictionary(), - parameters=aruco_detector_parameters) - if len(marker_corners) <= 0: - self._status_message_source.enqueue_status_message( - severity="warning", - message=f"Image {image_metadata.identifier} did not appear to contain any identifiable markers. " - f"It will be omitted from the calibration.") - continue - image_identifiers.append(image_metadata.identifier) - # Note: - # Marker corners are the corners of the markers, whereas - # ChArUco corners are the corners of the chessboard. - # ChArUco calibration function works with the corners of the chessboard. - _, frame_charuco_corners, frame_charuco_ids = cv2.aruco.interpolateCornersCharuco( - markerCorners=marker_corners, - markerIds=marker_ids, - image=image_greyscale, - board=charuco_board, - ) - # Algorithm requires a minimum of 4 markers - if len(frame_charuco_corners) >= 4: - all_charuco_corners.append(frame_charuco_corners) - all_charuco_ids.append(frame_charuco_ids) - - if len(all_charuco_corners) <= 0: - raise MCTDetectorRuntimeError(message="The input images did not contain visible markers.") - - # outputs to be stored in these containers - calibration_result = cv2.aruco.calibrateCameraCharucoExtended( - charucoCorners=all_charuco_corners, - charucoIds=all_charuco_ids, - board=charuco_board, - imageSize=numpy.array(charuco_spec.size_mm(), dtype="int32"), # Exception if float - cameraMatrix=numpy.identity(3, dtype='f'), - distCoeffs=numpy.zeros(5, dtype='f')) - - charuco_overall_reprojection_error = calibration_result[0] - charuco_camera_matrix = calibration_result[1] - charuco_distortion_coefficients = calibration_result[2] - charuco_rotation_vectors = calibration_result[3] - charuco_translation_vectors = calibration_result[4] - charuco_intrinsic_stdevs = calibration_result[5] - charuco_extrinsic_stdevs = calibration_result[6] - charuco_reprojection_errors = calibration_result[7] - - # TODO: Assertion on size of distortion coefficients being 5? - # Note: OpenCV documentation specifies the order of distortion coefficients - # https://docs.opencv.org/4.x/d9/d6a/group__aruco.html#ga366993d29fdddd995fba8c2e6ca811ea - # So far I have not seen calibration return a number of coefficients other than 5. - # Note too that there is an unchecked expectation that radial distortion be monotonic. - - intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration( - timestamp_utc=str(datetime.datetime.utcnow()), - image_resolution=image_resolution, - reprojection_error=charuco_overall_reprojection_error, - calibrated_values=IntrinsicParameters( - focal_length_x_px=charuco_camera_matrix[0, 0], - focal_length_y_px=charuco_camera_matrix[1, 1], - optical_center_x_px=charuco_camera_matrix[0, 2], - optical_center_y_px=charuco_camera_matrix[1, 2], - radial_distortion_coefficients=[ - charuco_distortion_coefficients[0, 0], - charuco_distortion_coefficients[1, 0], - charuco_distortion_coefficients[4, 0]], - tangential_distortion_coefficients=[ - charuco_distortion_coefficients[2, 0], - charuco_distortion_coefficients[3, 0]]), - calibrated_stdevs=[value[0] for value in charuco_intrinsic_stdevs], - marker_parameters=marker_parameters, - frame_results=[ - IntrinsicCalibrationFrameResult( - image_identifier=image_identifiers[i], - translation=Vec3( - x=charuco_translation_vectors[i][0, 0], - y=charuco_translation_vectors[i][1, 0], - z=charuco_translation_vectors[i][2, 0]), - rotation=Vec3( - x=charuco_rotation_vectors[i][0, 0], - y=charuco_rotation_vectors[i][1, 0], - z=charuco_rotation_vectors[i][2, 0]), - translation_stdev=Vec3( - x=charuco_extrinsic_stdevs[i*6 + 3, 0], - y=charuco_extrinsic_stdevs[i*6 + 4, 0], - z=charuco_extrinsic_stdevs[i*6 + 5, 0]), - rotation_stdev=Vec3( - x=charuco_extrinsic_stdevs[i*6 + 0, 0], - y=charuco_extrinsic_stdevs[i*6 + 1, 0], - z=charuco_extrinsic_stdevs[i*6 + 2, 0]), - reprojection_error=charuco_reprojection_errors[i, 0]) - for i in range(0, len(charuco_reprojection_errors))]) - - result_identifier: str = str(uuid.uuid4()) - result_filepath = self._result_filepath( - map_key=calibration_key, - result_identifier=result_identifier) - IOUtils.json_write( - filepath=result_filepath, - json_dict=intrinsic_calibration.dict(), - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", - message=msg), - on_error_for_dev=logger.error, - ignore_none=True) - result_metadata: CalibrationResultMetadata = CalibrationResultMetadata( - identifier=result_identifier, - image_identifiers=image_identifiers) - if len(self._calibration_map[calibration_key].result_metadata_list) == 0: - result_metadata.state = CalibrationResultState.ACTIVE # No active result yet, so make this one active - self._calibration_map[calibration_key].result_metadata_list.append(result_metadata) - self.save() - return result_identifier, intrinsic_calibration - - def _delete_if_exists(self, filepath: str): - try: - os.remove(filepath) - except FileNotFoundError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity="warning", # It *is* an internal error, but it has few consequences for user... so warning - message=f"Failed to remove a file from the calibrator because it does not exist. " - f"See its internal log for details.") - except OSError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity="warning", # It *is* an internal error, but it has few consequences for user... so warning - message=f"Failed to remove a file from the calibrator due to an unexpected reason. " - f"See its internal log for details.") - - def delete_staged(self) -> None: - for calibration_key in self._calibration_map.keys(): - calibration_value: CalibrationMapValue = self._calibration_map[calibration_key] - image_indices_to_delete: list = list() - for image_index, image in enumerate(calibration_value.image_metadata_list): - if image.state == CalibrationImageState.DELETE: - self._delete_if_exists(self._image_filepath( - map_key=calibration_key, - image_identifier=image.identifier)) - image_indices_to_delete.append(image_index) - for i in reversed(image_indices_to_delete): - del calibration_value.image_metadata_list[i] - result_indices_to_delete: list = list() - for result_index, result in enumerate(calibration_value.result_metadata_list): - if result.state == CalibrationResultState.DELETE: - self._delete_if_exists(self._result_filepath( - map_key=calibration_key, - result_identifier=result.identifier)) - result_indices_to_delete.append(result_index) - for i in reversed(result_indices_to_delete): - del calibration_value.result_metadata_list[i] - self.save() - - def _exists_on_filesystem( - self, - path: str, - pathtype: IOUtils.PathType, - create_path: bool = False - ) -> bool: - return IOUtils.exists( - path=path, - pathtype=pathtype, - create_path=create_path, - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", - message=msg), - on_error_for_dev=logger.error) - - # noinspection DuplicatedCode - def get_image( - self, - image_identifier: str - ) -> str: # image in base64 - found_count: int = 0 - matching_image_resolution: ImageResolution | None = None - for image_resolution in self._calibration_map: - for image in self._calibration_map[image_resolution].image_metadata_list: - if image.identifier == image_identifier: - found_count += 1 - matching_image_resolution = image_resolution - break - if found_count < 1: - raise MCTDetectorRuntimeError( - message=f"Image identifier {image_identifier} is not associated with any image.") - elif found_count > 1: - raise MCTDetectorRuntimeError( - message=f"Image identifier {image_identifier} is associated with multiple images.") - - image_filepath = self._image_filepath( - map_key=matching_image_resolution, - image_identifier=image_identifier) - if not os.path.exists(image_filepath): - raise MCTDetectorRuntimeError( - message=f"File does not exist for image {image_identifier} " - f"and given resolution {str(matching_image_resolution)}.") - image_bytes: bytes - try: - with (open(image_filepath, 'rb') as in_file): - image_bytes = in_file.read() - except OSError: - raise MCTDetectorRuntimeError( - message=f"Failed to open image {image_identifier} for " - f"given resolution {str(matching_image_resolution)}.") - image_base64 = ImageCoding.bytes_to_base64(image_bytes=image_bytes) - return image_base64 - - # noinspection DuplicatedCode - def get_result( - self, - result_identifier: str - ) -> IntrinsicCalibration: - found_count: int = 0 - matching_image_resolution: ImageResolution | None = None - for image_resolution in self._calibration_map: - for result in self._calibration_map[image_resolution].result_metadata_list: - if result.identifier == result_identifier: - found_count += 1 - matching_image_resolution = image_resolution - break - if found_count < 1: - raise MCTDetectorRuntimeError( - message=f"Image identifier {result_identifier} is not associated with any result.") - elif found_count > 1: - raise MCTDetectorRuntimeError( - message=f"Image identifier {result_identifier} is associated with multiple results.") - - return self._get_result_calibration_from_file( - image_resolution=matching_image_resolution, - result_identifier=result_identifier) - - def get_result_active( - self, - image_resolution: ImageResolution - ) -> IntrinsicCalibration | None: - active_count: int = 0 - matched_metadata: CalibrationResultMetadata | None = None - if image_resolution in self._calibration_map: - result_count: int = len(self._calibration_map[image_resolution].result_metadata_list) - if result_count > 0: - matched_metadata = self._calibration_map[image_resolution].result_metadata_list[0] - if matched_metadata.state == CalibrationResultState.ACTIVE: - active_count = 1 - for result_index in range(1, result_count): - result_metadata = self._calibration_map[image_resolution].result_metadata_list[result_index] - if matched_metadata.state == CalibrationResultState.DELETE: - matched_metadata = result_metadata - continue # basically we ignore any data staged for DELETE - elif matched_metadata.state == CalibrationResultState.RETAIN: - if result_metadata.state == CalibrationResultState.ACTIVE: - active_count += 1 - matched_metadata = result_metadata - continue # ACTIVE shall of course take priority - elif result_metadata.timestamp_utc() > matched_metadata.timestamp_utc(): - matched_metadata = result_metadata - else: # matched_result_metadata.state == CalibrationResultState.ACTIVE: - if result_metadata.state == CalibrationResultState.ACTIVE: - # BOTH metadata are marked ACTIVE. This is not expected to occur. Indicates a problem. - active_count += 1 - if result_metadata.timestamp_utc() > matched_metadata.timestamp_utc(): - matched_metadata = result_metadata - if matched_metadata is None or \ - matched_metadata.state == CalibrationResultState.DELETE: # no result that is not marked DELETE - return None - - if active_count < 1: - self._status_message_source.enqueue_status_message( - severity="warning", - message=f"No result metadata is active for resolution {str(image_resolution)}." - "Returning latest result.") - elif active_count > 1: - self._status_message_source.enqueue_status_message( - severity="warning", - message=f"Multiple result metadata are active for resolution {str(image_resolution)}. " - "Returning latest active result. " - "To recover from this ambiguous state, it is strong recommended to explicitly set " - "one of the results as \"active\", which will reset others to \"retain\".") - - return self._get_result_calibration_from_file( - image_resolution=image_resolution, - result_identifier=matched_metadata.identifier) - - def _get_result_calibration_from_file( - self, - image_resolution: ImageResolution, - result_identifier: str - ) -> IntrinsicCalibration: - result_filepath = self._result_filepath( - map_key=image_resolution, - result_identifier=result_identifier) - if not os.path.exists(result_filepath): - raise MCTDetectorRuntimeError( - message=f"File does not exist for result {result_identifier} " - f"and given resolution {str(image_resolution)}.") - result_json_raw: str - try: - with (open(result_filepath, 'r') as in_file): - result_json_raw = in_file.read() - except OSError: - raise MCTDetectorRuntimeError( - message=f"Failed to open result {result_identifier} for " - f"given resolution {str(image_resolution)}.") - result_json_dict: dict - try: - result_json_dict = dict(json.loads(result_json_raw)) - except JSONDecodeError: - raise MCTDetectorRuntimeError( - message=f"Failed to parse result {result_identifier} for " - f"given resolution {str(image_resolution)}.") - intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration(**result_json_dict) - return intrinsic_calibration - - def _image_filepath( - self, - map_key: ImageResolution, - image_identifier: str - ) -> str: - key_path: str = self._path_for_map_key(map_key=map_key) - return os.path.join( - key_path, - image_identifier + Calibrator.IMAGE_FORMAT) - - def list_resolutions(self) -> list[ImageResolution]: - resolutions: list[ImageResolution] = list(self._calibration_map.keys()) - return resolutions - - # noinspection DuplicatedCode - def list_image_metadata( - self, - image_resolution: ImageResolution - ) -> list[CalibrationImageMetadata]: - image_metadata_list: list[CalibrationImageMetadata] = list() - map_key: ImageResolution = image_resolution - if map_key in self._calibration_map: - image_metadata_list = self._calibration_map[map_key].image_metadata_list - return image_metadata_list - - # noinspection DuplicatedCode - def list_result_metadata( - self, - image_resolution: ImageResolution - ) -> list[CalibrationResultMetadata]: - result_metadata_list: list[CalibrationResultMetadata] = list() - map_key: ImageResolution = image_resolution - if map_key in self._calibration_map: - result_metadata_list = self._calibration_map[map_key].result_metadata_list - return result_metadata_list - - def load(self) -> bool: - """ - :return: True if loaded or if it can be created without overwriting existing data. False otherwise. - """ - calibration_map_filepath: str = self._map_filepath() - if not os.path.exists(calibration_map_filepath): - self._calibration_map = dict() - return True - elif not os.path.isfile(calibration_map_filepath): - logger.critical(f"Calibration map file location {calibration_map_filepath} exists but is not a file.") - self._status_message_source.enqueue_status_message( - severity="critical", - message="Filepath location for calibration map exists but is not a file. " - "Most likely a directory exists at that location, " - "and it needs to be manually removed.") - return False - json_dict: dict = IOUtils.hjson_read( - filepath=calibration_map_filepath, - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", - message=msg), - on_error_for_dev=logger.error) - if not json_dict: - logger.error(f"Failed to load calibration map from file {calibration_map_filepath}.") - self._status_message_source.enqueue_status_message( - severity="error", - message="Failed to load calibration map from file.") - return False - calibration_map: CalibrationMap - try: - calibration_map = CalibrationMap(**json_dict) - except ValidationError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity="error", - message="Failed to parse calibration map from file.") - return False - self._calibration_map = calibration_map.as_dict() - return True - - def _map_filepath(self) -> str: - return os.path.join(self._configuration.data_path, Calibrator.CALIBRATION_MAP_FILENAME) - - def _path_for_map_key( - self, - map_key: ImageResolution - ) -> str: - return os.path.join(self._configuration.data_path, str(map_key)) - - def _result_filepath( - self, - map_key: ImageResolution, - result_identifier: str - ) -> str: - key_path: str = self._path_for_map_key(map_key=map_key) - return os.path.join( - key_path, - result_identifier + Calibrator.RESULT_FORMAT) - - def save(self) -> None: - IOUtils.json_write( - filepath=self._map_filepath(), - json_dict=CalibrationMap.from_dict(self._calibration_map).dict(), - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", - message=msg), - on_error_for_dev=logger.error) - - # noinspection DuplicatedCode - def update_image_metadata( - self, - image_identifier: str, - image_state: CalibrationImageState, - image_label: str | None - ) -> None: - found_count: int = 0 - for map_key in self._calibration_map: - for image in self._calibration_map[map_key].image_metadata_list: - if image.identifier == image_identifier: - image.state = image_state - if image_label is not None: - image.label = image_label - found_count += 1 - break - if found_count < 1: - raise MCTDetectorRuntimeError( - message=f"Image identifier {image_identifier} is not associated with any image.") - elif found_count > 1: - self._status_message_source.enqueue_status_message( - severity="warning", - message=f"Image identifier {image_identifier} is associated with multiple images.") - self.save() - - # noinspection DuplicatedCode - def update_result_metadata( - self, - result_identifier: str, - result_state: CalibrationResultState, - result_label: str | None = None - ) -> None: - found_count: int = 0 - matching_map_keys: set[ImageResolution] = set() # Normally this shall be of size exactly 1 - for map_key in self._calibration_map: - for result in self._calibration_map[map_key].result_metadata_list: - if result.identifier == result_identifier: - result.state = result_state - if result_label is not None: - result.label = result_label - found_count += 1 - matching_map_keys.add(map_key) - break - - # Some cleanup as applicable - if result_state == CalibrationResultState.ACTIVE: - for map_key in matching_map_keys: - # If size greater than 1, something is wrong... but nonetheless - # we'll ensure there is only one active result per resolution - for result in self._calibration_map[map_key].result_metadata_list: - if result.identifier != result_identifier and result.state == CalibrationResultState.ACTIVE: - result.state = CalibrationResultState.RETAIN - - if found_count < 1: - raise MCTDetectorRuntimeError( - message=f"Result identifier {result_identifier} is not associated with any result.") - elif found_count > 1: - self._status_message_source.enqueue_status_message( - severity="warning", - message=f"Result identifier {result_identifier} is associated with multiple results. " - "This suggests that the calibration map is in an inconsistent state. " - "It may be prudent to either manually correct it, or recreate it.") - - self.save() diff --git a/src/detector/detector.py b/src/detector/detector.py index c1c9967..9fd4db0 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -1,23 +1,26 @@ from .api import \ - CalibrationCalculateRequest, \ - CalibrationCalculateResponse, \ - CalibrationDeleteStagedRequest, \ - CalibrationImageAddRequest, \ - CalibrationImageAddResponse, \ - CalibrationImageGetRequest, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListRequest, \ - CalibrationImageMetadataListResponse, \ - CalibrationImageMetadataUpdateRequest, \ - CalibrationResolutionListRequest, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetRequest, \ - CalibrationResultGetResponse, \ - CalibrationResultGetActiveRequest, \ - CalibrationResultGetActiveResponse, \ - CalibrationResultMetadataListRequest, \ - CalibrationResultMetadataListResponse, \ - CalibrationResultMetadataUpdateRequest, \ + AnnotatorParametersGetRequest, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest, \ + IntrinsicCalibrationCalculateRequest, \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationDeleteStagedRequest, \ + IntrinsicCalibrationImageAddRequest, \ + IntrinsicCalibrationImageAddResponse, \ + IntrinsicCalibrationImageGetRequest, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListRequest, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationImageMetadataUpdateRequest, \ + IntrinsicCalibrationResolutionListRequest, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetRequest, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultMetadataListRequest, \ + IntrinsicCalibrationResultMetadataListResponse, \ + IntrinsicCalibrationResultMetadataUpdateRequest, \ CameraImageGetRequest, \ CameraImageGetResponse, \ CameraParametersGetRequest, \ @@ -29,234 +32,326 @@ DetectorFrameGetRequest, \ DetectorFrameGetResponse, \ DetectorStartRequest, \ - DetectorStopRequest, \ - MarkerParametersGetRequest, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest -from .calibrator import Calibrator -from .exceptions import \ - MCTDetectorRuntimeError -from .interfaces import \ - AbstractMarker, \ - AbstractCamera -from .structures import \ - CalibrationImageMetadata, \ - CalibrationResultMetadata, \ - CameraStatus, \ - DetectorConfiguration, \ - MarkerStatus + DetectorStopRequest from src.common import \ + Annotator, \ + Camera, \ + DetectorFrame, \ EmptyResponse, \ ErrorResponse, \ - get_kwarg, \ - MCTComponent, \ - MCTRequest, \ - MCTResponse -from src.common.structures import \ - DetectorFrame, \ + ImageFormat, \ ImageResolution, \ IntrinsicCalibration, \ + IntrinsicCalibrator, \ KeyValueMetaAbstract, \ - KeyValueMetaAny, \ - key_value_meta_to_simple, \ - KeyValueSimpleAny + MCTCalibrationError, \ + MCTCameraRuntimeError, \ + MCTComponent, \ + MCTAnnotatorRuntimeError, \ + MCTRequest, \ + MCTResponse, \ + SeverityLabel import logging -from typing import Callable +from typing import Callable, Final +from pydantic import BaseModel, Field logger = logging.getLogger(__name__) +_ROLE_LABEL: Final[str] = "detector" + + +class _ConfigurationSection(BaseModel): + implementation: str = Field() + configuration: dict = Field() + + +# noinspection DuplicatedCode class Detector(MCTComponent): - _detector_configuration: DetectorConfiguration + class Configuration(BaseModel): + """ + Top-level schema for Detector initialization data + """ + detector_label: str = Field() + intrinsic_calibrator: _ConfigurationSection = Field() + camera: _ConfigurationSection = Field() + annotator: _ConfigurationSection = Field() - _calibrator: Calibrator - _camera: AbstractCamera - _marker: AbstractMarker + _configuration: Configuration + + _calibrator: IntrinsicCalibrator + _camera: Camera + _annotator: Annotator _frame_count: int def __init__( self, - detector_configuration: DetectorConfiguration, - camera_type: type[AbstractCamera], - marker_type: type[AbstractMarker] + detector_configuration: Configuration, + camera_type: type[Camera], + annotator_type: type[Annotator], + intrinsic_calibrator_type: type[IntrinsicCalibrator] ): super().__init__( status_source_label="detector", send_status_messages_to_logger=True) - self._detector_configuration = detector_configuration - self._calibrator = Calibrator( - configuration=detector_configuration.calibrator_configuration, - status_message_source=self.get_status_message_source()) + self._configuration = detector_configuration + # noinspection PyArgumentList + self._calibrator = intrinsic_calibrator_type( + configuration=intrinsic_calibrator_type.Configuration( + **detector_configuration.intrinsic_calibrator.configuration)) + # noinspection PyArgumentList self._camera = camera_type( - configuration=detector_configuration.camera_configuration, + configuration=camera_type.Configuration( + **detector_configuration.camera.configuration), status_message_source=self.get_status_message_source()) - self._marker = marker_type( - configuration=detector_configuration.marker_configuration, + # noinspection PyArgumentList + self._annotator = annotator_type( + configuration=annotator_type.Configuration( + **detector_configuration.annotator.configuration), status_message_source=self.get_status_message_source()) self._frame_count = 0 def __del__(self): self._camera.__del__() - def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | ErrorResponse: - request: CalibrationCalculateRequest = get_kwarg( + def annotator_parameters_get( + self, + **_kwargs + ) -> AnnotatorParametersGetResponse | ErrorResponse: + try: + parameters = self._annotator.get_parameters() + except MCTAnnotatorRuntimeError as e: + return ErrorResponse(message=e.message) + return AnnotatorParametersGetResponse(parameters=parameters) + + def annotator_parameters_set( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: AnnotatorParametersSetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=AnnotatorParametersSetRequest) + try: + self._annotator.set_parameters(parameters=request.parameters) + except MCTAnnotatorRuntimeError as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def calibration_calculate( + self, + **kwargs + ) -> IntrinsicCalibrationCalculateResponse | ErrorResponse: + request: IntrinsicCalibrationCalculateRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationCalculateRequest) + arg_type=IntrinsicCalibrationCalculateRequest) result_identifier: str intrinsic_calibration: IntrinsicCalibration try: - marker_parameters_kvm: list[KeyValueMetaAny] = self._marker.get_parameters() - marker_parameters_kvs: list[KeyValueSimpleAny] = key_value_meta_to_simple(marker_parameters_kvm) result_identifier, intrinsic_calibration = self._calibrator.calculate( - image_resolution=request.image_resolution, - marker_parameters=marker_parameters_kvs) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationCalculateResponse( + image_resolution=request.image_resolution) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationCalculateResponse( result_identifier=result_identifier, intrinsic_calibration=intrinsic_calibration) - def calibration_delete_staged(self, **_kwargs) -> EmptyResponse | ErrorResponse: + def calibration_delete_staged( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: try: self._calibrator.delete_staged() - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return EmptyResponse() - def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | ErrorResponse: + def calibration_image_add( + self, + **_kwargs + ) -> IntrinsicCalibrationImageAddResponse | ErrorResponse: try: - image_base64: str = self._camera.get_encoded_image(image_format=".png", requested_resolution=None) + image_base64: str + image_base64, _ = self._camera.get_encoded_image( + image_format=ImageFormat.FORMAT_PNG, + requested_resolution=None) image_identifier: str = self._calibrator.add_image(image_base64=image_base64) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationImageAddResponse(image_identifier=image_identifier) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationImageAddResponse(image_identifier=image_identifier) - def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | ErrorResponse: - request: CalibrationImageGetRequest = get_kwarg( + def calibration_image_get( + self, + **kwargs + ) -> IntrinsicCalibrationImageGetResponse | ErrorResponse: + request: IntrinsicCalibrationImageGetRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationImageGetRequest) + arg_type=IntrinsicCalibrationImageGetRequest) image_base64: str try: - image_base64 = self._calibrator.get_image(image_identifier=request.image_identifier) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationImageGetResponse(image_base64=image_base64) + image_base64 = self._calibrator.get_image_by_identifier(identifier=request.image_identifier) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationImageGetResponse(image_base64=image_base64) - def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataListResponse | ErrorResponse: - request: CalibrationImageMetadataListRequest = get_kwarg( + def calibration_image_metadata_list( + self, + **kwargs + ) -> IntrinsicCalibrationImageMetadataListResponse | ErrorResponse: + request: IntrinsicCalibrationImageMetadataListRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationImageMetadataListRequest) - image_metadata_list: list[CalibrationImageMetadata] + arg_type=IntrinsicCalibrationImageMetadataListRequest) + image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] try: - image_metadata_list = self._calibrator.list_image_metadata( + image_metadata_list = self._calibrator.list_image_metadata_by_image_resolution( image_resolution=request.image_resolution) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationImageMetadataListResponse(metadata_list=image_metadata_list) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationImageMetadataListResponse(metadata_list=image_metadata_list) - def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationImageMetadataUpdateRequest = get_kwarg( + def calibration_image_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: IntrinsicCalibrationImageMetadataUpdateRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationImageMetadataUpdateRequest) + arg_type=IntrinsicCalibrationImageMetadataUpdateRequest) try: self._calibrator.update_image_metadata( image_identifier=request.image_identifier, image_state=request.image_state, image_label=request.image_label) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return EmptyResponse() - def calibration_resolution_list(self, **_kwargs) -> CalibrationResolutionListResponse | ErrorResponse: + def calibration_resolution_list( + self, + **_kwargs + ) -> IntrinsicCalibrationResolutionListResponse | ErrorResponse: resolutions: list[ImageResolution] try: resolutions = self._calibrator.list_resolutions() - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationResolutionListResponse(resolutions=resolutions) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationResolutionListResponse(resolutions=resolutions) - def calibration_result_get(self, **kwargs) -> CalibrationResultGetResponse | ErrorResponse: - request: CalibrationResultGetRequest = get_kwarg( + def calibration_result_get( + self, + **kwargs + ) -> IntrinsicCalibrationResultGetResponse | ErrorResponse: + request: IntrinsicCalibrationResultGetRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationResultGetRequest) + arg_type=IntrinsicCalibrationResultGetRequest) intrinsic_calibration: IntrinsicCalibration try: intrinsic_calibration = self._calibrator.get_result(result_identifier=request.result_identifier) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationResultGetResponse(intrinsic_calibration=intrinsic_calibration) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationResultGetResponse(intrinsic_calibration=intrinsic_calibration) - def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActiveResponse | ErrorResponse: + def calibration_result_get_active( + self, + **_kwargs + ) -> IntrinsicCalibrationResultGetActiveResponse | ErrorResponse: intrinsic_calibration: IntrinsicCalibration | None try: image_resolution: ImageResolution = self._camera.get_resolution() - intrinsic_calibration = self._calibrator.get_result_active(image_resolution=image_resolution) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) + intrinsic_calibration = self._calibrator.get_result_active_by_image_resolution(image_resolution=image_resolution) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) - def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadataListResponse | ErrorResponse: - request: CalibrationResultMetadataListRequest = get_kwarg( + def calibration_result_metadata_list( + self, + **kwargs + ) -> IntrinsicCalibrationResultMetadataListResponse | ErrorResponse: + request: IntrinsicCalibrationResultMetadataListRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationResultMetadataListRequest) - result_metadata_list: list[CalibrationResultMetadata] + arg_type=IntrinsicCalibrationResultMetadataListRequest) + result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] try: - result_metadata_list = self._calibrator.list_result_metadata( + result_metadata_list = self._calibrator.list_result_metadata_by_image_resolution( image_resolution=request.image_resolution) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return CalibrationResultMetadataListResponse(metadata_list=result_metadata_list) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return IntrinsicCalibrationResultMetadataListResponse(metadata_list=result_metadata_list) - def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationResultMetadataUpdateRequest = get_kwarg( + def calibration_result_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: IntrinsicCalibrationResultMetadataUpdateRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationResultMetadataUpdateRequest) + arg_type=IntrinsicCalibrationResultMetadataUpdateRequest) try: self._calibrator.update_result_metadata( result_identifier=request.result_identifier, result_state=request.result_state, result_label=request.result_label) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return EmptyResponse() - def camera_image_get(self, **kwargs) -> CameraImageGetResponse | ErrorResponse: - request: CameraImageGetRequest = get_kwarg( + def camera_image_get( + self, + **kwargs + ) -> CameraImageGetResponse | ErrorResponse: + request: CameraImageGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CameraImageGetRequest) encoded_image_base64: str + original_resolution: ImageResolution try: - encoded_image_base64 = self._camera.get_encoded_image( + encoded_image_base64, original_resolution = self._camera.get_encoded_image( image_format=request.format, requested_resolution=request.requested_resolution) - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraImageGetResponse( format=request.format, - image_base64=encoded_image_base64) + image_base64=encoded_image_base64, + original_resolution=original_resolution) - def camera_parameters_get(self, **_kwargs) -> CameraParametersGetResponse | ErrorResponse: + def camera_parameters_get( + self, + **_kwargs + ) -> CameraParametersGetResponse | ErrorResponse: parameters: list[KeyValueMetaAbstract] try: parameters = self._camera.get_parameters() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraParametersGetResponse(parameters=parameters) - def camera_parameters_set(self, **kwargs) -> CameraParametersSetResponse | ErrorResponse: - request: CameraParametersSetRequest = get_kwarg( + def camera_parameters_set( + self, + **kwargs + ) -> CameraParametersSetResponse | ErrorResponse: + request: CameraParametersSetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CameraParametersSetRequest) @@ -264,107 +359,111 @@ def camera_parameters_set(self, **kwargs) -> CameraParametersSetResponse | Error try: self._camera.set_parameters(parameters=request.parameters) new_resolution = self._camera.get_resolution() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraParametersSetResponse(resolution=new_resolution) - def camera_resolution_get(self, **_kwargs) -> CameraResolutionGetResponse | ErrorResponse: + def camera_resolution_get( + self, + **_kwargs + ) -> CameraResolutionGetResponse | ErrorResponse: image_resolution: ImageResolution try: image_resolution = self._camera.get_resolution() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraResolutionGetResponse(resolution=image_resolution) - def detector_frame_get(self, **kwargs) -> DetectorFrameGetResponse | ErrorResponse: - request: DetectorFrameGetRequest = get_kwarg( + def detector_frame_get( + self, + **kwargs + ) -> DetectorFrameGetResponse | ErrorResponse: + request: DetectorFrameGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=DetectorFrameGetRequest) detector_frame: DetectorFrame try: detector_frame = DetectorFrame( - detected_marker_snapshots=list(), - rejected_marker_snapshots=list(), - timestamp_utc_iso8601=self._marker.get_changed_timestamp().isoformat(), + annotations=list(), + timestamp_utc_iso8601=self._annotator.get_changed_timestamp().isoformat(), image_resolution=self._camera.get_resolution()) if request.include_detected: - detector_frame.detected_marker_snapshots = self._marker.get_markers_detected() + detector_frame.annotations += self._annotator.get_markers_detected() if request.include_rejected: - detector_frame.rejected_marker_snapshots = self._marker.get_markers_rejected() - except MCTDetectorRuntimeError as e: + detector_frame.annotations += self._annotator.get_markers_rejected() + except (MCTCameraRuntimeError, MCTAnnotatorRuntimeError) as e: return ErrorResponse(message=e.message) return DetectorFrameGetResponse(frame=detector_frame) - def detector_start(self, **_kwargs) -> EmptyResponse | ErrorResponse: + def detector_start( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: try: self._camera.start() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return EmptyResponse() - def detector_stop(self, **_kwargs) -> EmptyResponse | ErrorResponse: + def detector_stop( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: try: self._camera.stop() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return EmptyResponse() - def marker_parameters_get(self, **_kwargs) -> MarkerParametersGetResponse | ErrorResponse: - try: - parameters = self._marker.get_parameters() - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return MarkerParametersGetResponse(parameters=parameters) + @staticmethod + def get_role_label(): + return _ROLE_LABEL - def marker_parameters_set(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: MarkerParametersSetRequest = get_kwarg( - kwargs=kwargs, - key="request", - arg_type=MarkerParametersSetRequest) - try: - self._marker.set_parameters(parameters=request.parameters) - except MCTDetectorRuntimeError as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: - return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_methods() return_value.update({ DetectorFrameGetRequest: self.detector_frame_get, DetectorStartRequest: self.detector_start, DetectorStopRequest: self.detector_stop, - CalibrationCalculateRequest: self.calibration_calculate, - CalibrationDeleteStagedRequest: self.calibration_delete_staged, - CalibrationImageAddRequest: self.calibration_image_add, - CalibrationImageGetRequest: self.calibration_image_get, - CalibrationImageMetadataListRequest: self.calibration_image_metadata_list, - CalibrationImageMetadataUpdateRequest: self.calibration_image_metadata_update, - CalibrationResolutionListRequest: self.calibration_resolution_list, - CalibrationResultGetRequest: self.calibration_result_get, - CalibrationResultGetActiveRequest: self.calibration_result_get_active, - CalibrationResultMetadataListRequest: self.calibration_result_metadata_list, - CalibrationResultMetadataUpdateRequest: self.calibration_result_metadata_update, + IntrinsicCalibrationCalculateRequest: self.calibration_calculate, + IntrinsicCalibrationDeleteStagedRequest: self.calibration_delete_staged, + IntrinsicCalibrationImageAddRequest: self.calibration_image_add, + IntrinsicCalibrationImageGetRequest: self.calibration_image_get, + IntrinsicCalibrationImageMetadataListRequest: self.calibration_image_metadata_list, + IntrinsicCalibrationImageMetadataUpdateRequest: self.calibration_image_metadata_update, + IntrinsicCalibrationResolutionListRequest: self.calibration_resolution_list, + IntrinsicCalibrationResultGetRequest: self.calibration_result_get, + IntrinsicCalibrationResultGetActiveRequest: self.calibration_result_get_active, + IntrinsicCalibrationResultMetadataListRequest: self.calibration_result_metadata_list, + IntrinsicCalibrationResultMetadataUpdateRequest: self.calibration_result_metadata_update, CameraImageGetRequest: self.camera_image_get, CameraParametersGetRequest: self.camera_parameters_get, CameraParametersSetRequest: self.camera_parameters_set, CameraResolutionGetRequest: self.camera_resolution_get, - MarkerParametersGetRequest: self.marker_parameters_get, - MarkerParametersSetRequest: self.marker_parameters_set}) + AnnotatorParametersGetRequest: self.annotator_parameters_get, + AnnotatorParametersSetRequest: self.annotator_parameters_set}) return return_value async def update(self): if self.time_sync_active: return - if self._camera.get_status() == CameraStatus.RUNNING: + if self._camera.get_status() == Camera.Status.RUNNING: try: self._camera.update() - except MCTDetectorRuntimeError as e: - self.add_status_message(severity="error", message=e.message) - if self._marker.get_status() == MarkerStatus.RUNNING and \ - self._camera.get_changed_timestamp() > self._marker.get_changed_timestamp(): - self._marker.update(self._camera.get_image()) - self._frame_count += 1 - if self._frame_count % 1000 == 0: - print(f"Update count: {self._frame_count}") + except MCTCameraRuntimeError as e: + self.add_status_message( + severity=SeverityLabel.ERROR, + message=f"Exception occurred in Camera update: {e.message}") + if self._annotator.get_status() == Annotator.Status.RUNNING and \ + self._camera.get_changed_timestamp() > self._annotator.get_changed_timestamp(): + try: + self._annotator.update(self._camera.get_image()) + except MCTAnnotatorRuntimeError as e: + self.add_status_message( + severity=SeverityLabel.ERROR, + message=f"Exception occurred in Annotator update: {e.message}") + # self._frame_count += 1 + # if self._frame_count % 1000 == 0: + # print(f"Update count: {self._frame_count}") diff --git a/src/detector/exceptions.py b/src/detector/exceptions.py deleted file mode 100644 index 06a2c84..0000000 --- a/src/detector/exceptions.py +++ /dev/null @@ -1,9 +0,0 @@ -from src.common.exceptions import MCTError - - -class MCTDetectorRuntimeError(MCTError): - message: str - - def __init__(self, *args, message: str): - super().__init__(*args) - self.message = message diff --git a/src/detector/implementations/marker_aruco_opencv.py b/src/detector/implementations/marker_aruco_opencv.py deleted file mode 100644 index 61c2693..0000000 --- a/src/detector/implementations/marker_aruco_opencv.py +++ /dev/null @@ -1,150 +0,0 @@ -from ..exceptions import MCTDetectorRuntimeError -from ..interfaces import AbstractMarker -from ..structures import \ - MarkerConfiguration, \ - MarkerStatus -from ..util import \ - assign_aruco_detection_parameters_to_key_value_list, \ - assign_key_value_list_to_aruco_detection_parameters -from src.common import StatusMessageSource -from src.common.structures import \ - KeyValueMetaAny, \ - KeyValueSimpleAny, \ - MarkerCornerImagePoint, \ - MarkerSnapshot -import cv2.aruco -import datetime -import logging -import numpy -from typing import Any - - -logger = logging.getLogger(__name__) - - -# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html -# for documentation on individual parameters - - -class ArucoOpenCVMarker(AbstractMarker): - - _marker_dictionary: Any | None # created by OpenCV, type cv2.aruco.Dictionary - _marker_parameters: Any # created by OpenCV, type cv2.aruco.DetectorParameters - _marker_label_reverse_dictionary: dict[int, str] - _marker_detected_snapshots: list[MarkerSnapshot] - _marker_rejected_snapshots: list[MarkerSnapshot] - _marker_timestamp_utc: datetime.datetime - - def __init__( - self, - configuration: MarkerConfiguration, - status_message_source: StatusMessageSource - ): - super().__init__( - configuration=configuration, - status_message_source=status_message_source) - - self._marker_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) - self._marker_parameters = cv2.aruco.DetectorParameters_create() - self._marker_label_reverse_dictionary = dict() - self._marker_detected_snapshots = list() # Markers that are determined to be valid, and are identified - self._marker_rejected_snapshots = list() # Things that looked at first like markers but got later filtered out - self._marker_timestamp_utc = datetime.datetime.min - self.set_status(MarkerStatus.RUNNING) # Always running - - def get_changed_timestamp(self) -> datetime.datetime: - return self._marker_timestamp_utc - - def get_markers_detected(self) -> list[MarkerSnapshot]: - return self._marker_detected_snapshots - - def get_markers_rejected(self) -> list[MarkerSnapshot]: - return self._marker_rejected_snapshots - - def get_parameters(self) -> list[KeyValueMetaAny]: - return assign_aruco_detection_parameters_to_key_value_list(self._marker_parameters) - - @staticmethod - def get_type_identifier() -> str: - return "aruco_opencv" - - @staticmethod - def _marker_corner_image_point_list_from_embedded_list( - corner_image_points_px: list[list[float]] - ) -> list[MarkerCornerImagePoint]: - corner_image_point_list: list[MarkerCornerImagePoint] = list() - assert len(corner_image_points_px) == 4 - for corner_image_point_px in corner_image_points_px: - corner_image_point_list.append(MarkerCornerImagePoint( - x_px=corner_image_point_px[0], - y_px=corner_image_point_px[1])) - return corner_image_point_list - - # noinspection DuplicatedCode - def set_parameters( - self, - parameters: list[KeyValueSimpleAny] - ) -> None: - mismatched_keys: list[str] = assign_key_value_list_to_aruco_detection_parameters( - detection_parameters=self._marker_parameters, - key_value_list=parameters) - if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( - message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") - - def update( - self, - image: numpy.ndarray - ) -> None: - if self._marker_dictionary is None: - message: str = "No marker dictionary has been set." - self.add_status_message(severity="error", message=message) - self.set_status(MarkerStatus.FAILURE) - return - - image_greyscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) - (detected_corner_points_raw, detected_dictionary_indices, rejected_corner_points_raw) = cv2.aruco.detectMarkers( - image=image_greyscale, - dictionary=self._marker_dictionary, - parameters=self._marker_parameters) - - self._marker_detected_snapshots = list() - # note: detected_indices is (inconsistently) None sometimes if no markers are detected - if detected_dictionary_indices is not None and len(detected_dictionary_indices) > 0: - detected_marker_count = detected_dictionary_indices.size - # Shape of some output was previously observed to (also) be inconsistent... make it consistent here: - detected_corner_points_px = numpy.array(detected_corner_points_raw).reshape((detected_marker_count, 4, 2)) - detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_marker_count)) - for detected_marker_index, detected_marker_id in enumerate(detected_dictionary_indices): - if False: # TODO: Re-enable - if detected_marker_id not in self._marker_label_reverse_dictionary: - message: str = \ - f"Found a marker with index {detected_marker_id} "\ - "but it does not appear in the dictionary." - self.add_status_message(severity="error", message=message) - self.set_status(MarkerStatus.FAILURE) - return - marker_label: str = self._marker_label_reverse_dictionary[detected_marker_id] - else: - marker_label: str = str(detected_marker_id) - corner_image_points_px = detected_corner_points_px[detected_marker_index] - corner_image_points: list[MarkerCornerImagePoint] = \ - self._marker_corner_image_point_list_from_embedded_list( - corner_image_points_px=corner_image_points_px.tolist()) - self._marker_detected_snapshots.append(MarkerSnapshot( - label=marker_label, - corner_image_points=corner_image_points)) - - self._marker_rejected_snapshots = list() - if rejected_corner_points_raw: - rejected_corner_points_px = numpy.array(rejected_corner_points_raw).reshape((-1, 4, 2)) - for rejected_marker_index in range(rejected_corner_points_px.shape[0]): - corner_image_points_px = rejected_corner_points_px[rejected_marker_index] - corner_image_points: list[MarkerCornerImagePoint] = \ - self._marker_corner_image_point_list_from_embedded_list( - corner_image_points_px=corner_image_points_px.tolist()) - self._marker_rejected_snapshots.append(MarkerSnapshot( - label=f"unknown", - corner_image_points=corner_image_points)) - - self._marker_timestamp_utc = datetime.datetime.utcnow() diff --git a/src/detector/interfaces/__init__.py b/src/detector/interfaces/__init__.py deleted file mode 100644 index 88c4fb9..0000000 --- a/src/detector/interfaces/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .abstract_camera import AbstractCamera -from .abstract_marker import AbstractMarker diff --git a/src/detector/structures/__init__.py b/src/detector/structures/__init__.py deleted file mode 100644 index 19efb74..0000000 --- a/src/detector/structures/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .calibration_configuration import CalibratorConfiguration -from .calibration_image_metadata import CalibrationImageMetadata -from .calibration_image_state import CalibrationImageState -from .calibration_map import CalibrationMap -from .calibration_map_entry import CalibrationMapEntry -from .calibration_map_value import CalibrationMapValue -from .calibration_result_metadata import CalibrationResultMetadata -from .calibration_result_state import CalibrationResultState -from .camera_configuration import CameraConfiguration -from .camera_status import CameraStatus -from .detector_configuration import DetectorConfiguration -from .marker_status import MarkerStatus -from .marker_configuration import MarkerConfiguration diff --git a/src/detector/structures/calibration_configuration.py b/src/detector/structures/calibration_configuration.py deleted file mode 100644 index 95bb8dd..0000000 --- a/src/detector/structures/calibration_configuration.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydantic import BaseModel, Field - - -class CalibratorConfiguration(BaseModel): - data_path: str = Field() diff --git a/src/detector/structures/calibration_image_metadata.py b/src/detector/structures/calibration_image_metadata.py deleted file mode 100644 index dc524a7..0000000 --- a/src/detector/structures/calibration_image_metadata.py +++ /dev/null @@ -1,10 +0,0 @@ -from .calibration_image_state import CalibrationImageState -import datetime -from pydantic import BaseModel, Field - - -class CalibrationImageMetadata(BaseModel): - identifier: str = Field() - label: str = Field(default_factory=str) - timestamp_utc: str = Field(default_factory=lambda: str(datetime.datetime.utcnow())) - state: CalibrationImageState = Field(default=CalibrationImageState.SELECT) diff --git a/src/detector/structures/calibration_image_state.py b/src/detector/structures/calibration_image_state.py deleted file mode 100644 index 58218e1..0000000 --- a/src/detector/structures/calibration_image_state.py +++ /dev/null @@ -1,8 +0,0 @@ -from enum import StrEnum -from typing import Final - - -class CalibrationImageState(StrEnum): - IGNORE: Final[int] = "ignore" - SELECT: Final[int] = "select" - DELETE: Final[int] = "delete" # stage for deletion diff --git a/src/detector/structures/calibration_map.py b/src/detector/structures/calibration_map.py deleted file mode 100644 index 4e96e02..0000000 --- a/src/detector/structures/calibration_map.py +++ /dev/null @@ -1,26 +0,0 @@ -from .calibration_map_entry import CalibrationMapEntry -from .calibration_map_value import CalibrationMapValue -from src.common.structures import ImageResolution -from pydantic import BaseModel, Field - - -class CalibrationMap(BaseModel): - entries: list[CalibrationMapEntry] = Field(default_factory=list) - - def as_dict(self) -> dict[ImageResolution, CalibrationMapValue]: - return_value: dict[ImageResolution, CalibrationMapValue] = dict() - for entry in self.entries: - if entry.key not in return_value: - return_value[entry.key] = CalibrationMapValue() - for image_metadata in entry.value.image_metadata_list: - return_value[entry.key].image_metadata_list.append(image_metadata) - for result_metadata in entry.value.result_metadata_list: - return_value[entry.key].result_metadata_list.append(result_metadata) - return return_value - - @staticmethod - def from_dict(in_dict: dict[ImageResolution, CalibrationMapValue]): - entries: list[CalibrationMapEntry] = list() - for key in in_dict.keys(): - entries.append(CalibrationMapEntry(key=key, value=in_dict[key])) - return CalibrationMap(entries=entries) diff --git a/src/detector/structures/calibration_map_entry.py b/src/detector/structures/calibration_map_entry.py deleted file mode 100644 index b04c1af..0000000 --- a/src/detector/structures/calibration_map_entry.py +++ /dev/null @@ -1,8 +0,0 @@ -from .calibration_map_value import CalibrationMapValue -from src.common.structures import ImageResolution -from pydantic import BaseModel, Field - - -class CalibrationMapEntry(BaseModel): - key: ImageResolution = Field() - value: CalibrationMapValue = Field() diff --git a/src/detector/structures/calibration_map_value.py b/src/detector/structures/calibration_map_value.py deleted file mode 100644 index 77159fd..0000000 --- a/src/detector/structures/calibration_map_value.py +++ /dev/null @@ -1,8 +0,0 @@ -from .calibration_image_metadata import CalibrationImageMetadata -from .calibration_result_metadata import CalibrationResultMetadata -from pydantic import BaseModel, Field - - -class CalibrationMapValue(BaseModel): - image_metadata_list: list[CalibrationImageMetadata] = Field(default_factory=list) - result_metadata_list: list[CalibrationResultMetadata] = Field(default_factory=list) diff --git a/src/detector/structures/calibration_result_metadata.py b/src/detector/structures/calibration_result_metadata.py deleted file mode 100644 index 605b989..0000000 --- a/src/detector/structures/calibration_result_metadata.py +++ /dev/null @@ -1,14 +0,0 @@ -from .calibration_result_state import CalibrationResultState -import datetime -from pydantic import BaseModel, Field - - -class CalibrationResultMetadata(BaseModel): - identifier: str = Field() - label: str = Field(default_factory=str) - timestamp_utc_iso8601: str = Field(default_factory=lambda: str(datetime.datetime.utcnow())) - image_identifiers: list[str] = Field(default_factory=list) - state: CalibrationResultState = Field(default=CalibrationResultState.RETAIN) - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/detector/structures/calibration_result_state.py b/src/detector/structures/calibration_result_state.py deleted file mode 100644 index 84684fa..0000000 --- a/src/detector/structures/calibration_result_state.py +++ /dev/null @@ -1,14 +0,0 @@ -from enum import StrEnum -from typing import Final - - -class CalibrationResultState(StrEnum): - # indicate to use this calibration (as opposed to simply storing it) - # normally there shall only ever be one ACTIVE calibration for a given image resolution - ACTIVE: Final[str] = "active" - - # store the calibration, but don't mark it for use - RETAIN: Final[str] = "retain" - - # stage for deletion - DELETE: Final[str] = "delete" diff --git a/src/detector/structures/camera_configuration.py b/src/detector/structures/camera_configuration.py deleted file mode 100644 index 453fb80..0000000 --- a/src/detector/structures/camera_configuration.py +++ /dev/null @@ -1,7 +0,0 @@ -from pydantic import BaseModel, Field -from typing import Union - - -class CameraConfiguration(BaseModel): - driver: str = Field() - capture_device: Union[str, int] = Field() # Not used by all drivers (notably it IS used by OpenCV) diff --git a/src/detector/structures/camera_status.py b/src/detector/structures/camera_status.py deleted file mode 100644 index 5cd6c87..0000000 --- a/src/detector/structures/camera_status.py +++ /dev/null @@ -1,11 +0,0 @@ -from typing import Final -from enum import StrEnum - - -class CameraStatus(StrEnum): - STOPPED: Final[int] = "STOPPED" - RUNNING: Final[int] = "RUNNING" - FAILURE: Final[int] = "FAILURE" - - def in_runnable_state(self): - return self == CameraStatus.RUNNING diff --git a/src/detector/structures/detector_configuration.py b/src/detector/structures/detector_configuration.py deleted file mode 100644 index d55845c..0000000 --- a/src/detector/structures/detector_configuration.py +++ /dev/null @@ -1,13 +0,0 @@ -from .calibration_configuration import CalibratorConfiguration -from .camera_configuration import CameraConfiguration -from .marker_configuration import MarkerConfiguration -from pydantic import BaseModel, Field - - -class DetectorConfiguration(BaseModel): - """ - Top-level schema for Detector initialization data - """ - calibrator_configuration: CalibratorConfiguration = Field() - camera_configuration: CameraConfiguration = Field() - marker_configuration: MarkerConfiguration = Field() diff --git a/src/detector/structures/marker_configuration.py b/src/detector/structures/marker_configuration.py deleted file mode 100644 index 908afd1..0000000 --- a/src/detector/structures/marker_configuration.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydantic import BaseModel, Field - - -class MarkerConfiguration(BaseModel): - method: str = Field() diff --git a/src/detector/structures/marker_status.py b/src/detector/structures/marker_status.py deleted file mode 100644 index 9387970..0000000 --- a/src/detector/structures/marker_status.py +++ /dev/null @@ -1,8 +0,0 @@ -from typing import Final -from enum import StrEnum - - -class MarkerStatus(StrEnum): - STOPPED: Final[int] = "STOPPED" - RUNNING: Final[int] = "RUNNING" - FAILURE: Final[int] = "FAILURE" diff --git a/src/detector/util.py b/src/detector/util.py deleted file mode 100644 index d86a3a5..0000000 --- a/src/detector/util.py +++ /dev/null @@ -1,467 +0,0 @@ -from .exceptions import MCTDetectorRuntimeError -from src.common.structures import \ - CornerRefinementMethod, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT, \ - KeyValueMetaAny, \ - KeyValueMetaBool, \ - KeyValueMetaEnum, \ - KeyValueMetaFloat, \ - KeyValueMetaInt, \ - KeyValueSimpleAbstract, \ - KeyValueSimpleAny, \ - KeyValueSimpleBool, \ - KeyValueSimpleFloat, \ - KeyValueSimpleInt, \ - KeyValueSimpleString -import logging -import numpy -from typing import Final, get_args - - -logger = logging.getLogger(__name__) - - -# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html -# for documentation on individual parameters - -# Adaptive Thresholding -KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = "adaptiveThreshWinSizeMin" -KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = "adaptiveThreshWinSizeMax" -KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = "adaptiveThreshWinSizeStep" -KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = "adaptiveThreshConstant" -# Contour Filtering -KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = "minMarkerPerimeterRate" # Marker size ratio -KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = "maxMarkerPerimeterRate" -KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = "polygonalApproxAccuracyRate" # Square tolerance ratio -KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = "minCornerDistanceRate" # Corner separation ratio -KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = "minMarkerDistanceRate" # Marker separation ratio -KEY_MIN_DISTANCE_TO_BORDER: Final[str] = "minDistanceToBorder" # Border distance in pixels -# Bits Extraction -KEY_MARKER_BORDER_BITS: Final[str] = "markerBorderBits" # Border width (px) -KEY_MIN_OTSU_STDDEV: Final[str] = "minOtsuStdDev" # Minimum brightness stdev -KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = "perspectiveRemovePixelPerCell" # Bit Sampling Rate -KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = "perspectiveRemoveIgnoredMarginPerCell" # Bit Margin Ratio -# Marker Identification -KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = "maxErroneousBitsInBorderRate" # Border Error Rate -KEY_ERROR_CORRECTION_RATE: Final[str] = "errorCorrectionRate" # Error Correction Rat -KEY_DETECT_INVERTED_MARKER: Final[str] = "detectInvertedMarker" -KEY_CORNER_REFINEMENT_METHOD: Final[str] = "cornerRefinementMethod" -KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = "cornerRefinementWinSize" -KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = "cornerRefinementMaxIterations" -KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = "cornerRefinementMinAccuracy" -# April Tag Only -KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = "aprilTagCriticalRad" -KEY_APRIL_TAG_DEGLITCH: Final[str] = "aprilTagDeglitch" -KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = "aprilTagMaxLineFitMse" -KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = "aprilTagMaxNmaxima" -KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = "aprilTagMinClusterPixels" -KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = "aprilTagMinWhiteBlackDiff" -KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = "aprilTagQuadDecimate" -KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = "aprilTagQuadSigma" -# ArUco 3 -KEY_USE_ARUCO_3_DETECTION: Final[str] = "useAruco3Detection" -KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = "minMarkerLengthRatioOriginalImg" -KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = "minSideLengthCanonicalImg" - - -def assign_aruco_detection_parameters_to_key_value_list( - detection_parameters: ... # cv2.aruco.DetectionParameters -) -> list[KeyValueMetaAny]: - - return_value: list[KeyValueMetaAny] = list() - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN, - value=detection_parameters.adaptiveThreshWinSizeMin, - range_minimum=1, - range_maximum=99)) - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX, - value=detection_parameters.adaptiveThreshWinSizeMax, - range_minimum=1, - range_maximum=99)) - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP, - value=detection_parameters.adaptiveThreshWinSizeStep, - range_minimum=1, - range_maximum=99, - range_step=2)) - - return_value.append(KeyValueMetaFloat( - key=KEY_ADAPTIVE_THRESH_CONSTANT, - value=detection_parameters.adaptiveThreshConstant, - range_minimum=-255.0, - range_maximum=255.0, - range_step=1.0)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_PERIMETER_RATE, - value=detection_parameters.minMarkerPerimeterRate, - range_minimum=0, - range_maximum=8.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MAX_MARKER_PERIMETER_RATE, - value=detection_parameters.maxMarkerPerimeterRate, - range_minimum=0.0, - range_maximum=8.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_POLYGONAL_APPROX_ACCURACY_RATE, - value=detection_parameters.polygonalApproxAccuracyRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_CORNER_DISTANCE_RATE, - value=detection_parameters.minCornerDistanceRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_DISTANCE_RATE, - value=detection_parameters.minMarkerDistanceRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_MIN_DISTANCE_TO_BORDER, - value=detection_parameters.minDistanceToBorder, - range_minimum=0, - range_maximum=512)) - - return_value.append(KeyValueMetaInt( - key=KEY_MARKER_BORDER_BITS, - value=detection_parameters.markerBorderBits, - range_minimum=1, - range_maximum=9)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_OTSU_STDDEV, - value=detection_parameters.minOtsuStdDev, - range_minimum=0.0, - range_maximum=256.0, - range_step=1.0)) - - return_value.append(KeyValueMetaInt( - key=KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL, - value=detection_parameters.perspectiveRemovePixelPerCell, - range_minimum=1, - range_maximum=20)) - - return_value.append(KeyValueMetaFloat( - key=KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL, - value=detection_parameters.perspectiveRemoveIgnoredMarginPerCell, - range_minimum=0.0, - range_maximum=0.5, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE, - value=detection_parameters.maxErroneousBitsInBorderRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_ERROR_CORRECTION_RATE, - value=detection_parameters.errorCorrectionRate, - range_minimum=-0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaBool( - key=KEY_DETECT_INVERTED_MARKER, - value=detection_parameters.detectInvertedMarker)) - - if detection_parameters.cornerRefinementMethod not in CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: - message: str = f"Corner refinement method appears to be set to an invalid value: " \ - f"{detection_parameters.corner_refinement_method}." - logger.error(message) - raise MCTDetectorRuntimeError(message=message) - corner_refinement_method_text: CornerRefinementMethod = \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT[detection_parameters.cornerRefinementMethod] - return_value.append(KeyValueMetaEnum( - key=KEY_CORNER_REFINEMENT_METHOD, - value=corner_refinement_method_text, - allowable_values=get_args(CornerRefinementMethod))) - - return_value.append(KeyValueMetaInt( - key=KEY_CORNER_REFINEMENT_WIN_SIZE, - value=detection_parameters.cornerRefinementWinSize, - range_minimum=1, - range_maximum=9)) - - return_value.append(KeyValueMetaInt( - key=KEY_CORNER_REFINEMENT_MAX_ITERATIONS, - value=detection_parameters.cornerRefinementMaxIterations, - range_minimum=1, - range_maximum=100)) - - return_value.append(KeyValueMetaFloat( - key=KEY_CORNER_REFINEMENT_MIN_ACCURACY, - value=detection_parameters.cornerRefinementMinAccuracy, - range_minimum=0.0, - range_maximum=5.0, - range_step=0.1)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_CRITICAL_RAD, - value=detection_parameters.aprilTagCriticalRad, - range_minimum=-0.0, - range_maximum=numpy.pi, - range_step=numpy.pi / 20.0)) - - return_value.append(KeyValueMetaBool( - key=KEY_APRIL_TAG_DEGLITCH, - value=detection_parameters.aprilTagDeglitch)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_MAX_LINE_FIT_MSE, - value=detection_parameters.aprilTagMaxLineFitMse, - range_minimum=0.0, - range_maximum=512.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MAX_N_MAXIMA, - value=detection_parameters.aprilTagMaxNmaxima, - range_minimum=1, - range_maximum=100)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MIN_CLUSTER_PIXELS, - value=detection_parameters.aprilTagMinClusterPixels, - range_minimum=0, - range_maximum=512)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF, - value=detection_parameters.aprilTagMinWhiteBlackDiff, - range_minimum=0, - range_maximum=256)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_QUAD_DECIMATE, - value=detection_parameters.aprilTagQuadDecimate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_QUAD_SIGMA, - value=detection_parameters.aprilTagQuadSigma, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - # Note: a relatively recent addition to OpenCV, may not be available in some python versions - if hasattr(detection_parameters, "useAruco3Detection"): - return_value.append(KeyValueMetaBool( - key=KEY_USE_ARUCO_3_DETECTION, - value=detection_parameters.useAruco3Detection)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG, - value=detection_parameters.minMarkerLengthRatioOriginalImg, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_MIN_SIDE_LENGTH_CANONICAL_IMG, - value=detection_parameters.minSideLengthCanonicalImg, - range_minimum=1, - range_maximum=512)) - - return return_value - - -def assign_key_value_list_to_aruco_detection_parameters( - detection_parameters: ..., # cv2.aruco.DetectionParameters - key_value_list: list[KeyValueSimpleAny] -) -> list[str]: - """ - Returns list of mismatched keys - """ - mismatched_keys: list[str] = list() - key_value: KeyValueSimpleAbstract - for key_value in key_value_list: - if key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeMin = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeMax = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeStep = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_CONSTANT: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshConstant = key_value.value - elif key_value.key == KEY_MIN_MARKER_PERIMETER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerPerimeterRate = key_value.value - elif key_value.key == KEY_MAX_MARKER_PERIMETER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.maxMarkerPerimeterRate = key_value.value - elif key_value.key == KEY_POLYGONAL_APPROX_ACCURACY_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.polygonalApproxAccuracyRate = key_value.value - elif key_value.key == KEY_MIN_CORNER_DISTANCE_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minCornerDistanceRate = key_value.value - elif key_value.key == KEY_MIN_MARKER_DISTANCE_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerDistanceRate = key_value.value - elif key_value.key == KEY_MIN_DISTANCE_TO_BORDER: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minDistanceToBorder = key_value.value - elif key_value.key == KEY_MARKER_BORDER_BITS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.markerBorderBits = key_value.value - elif key_value.key == KEY_MIN_OTSU_STDDEV: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minOtsuStdDev = key_value.value - elif key_value.key == KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.perspectiveRemovePixelPerCell = key_value.value - elif key_value.key == KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.perspectiveRemoveIgnoredMarginPerCell = key_value.value - elif key_value.key == KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.maxErroneousBitsInBorderRate = key_value.value - elif key_value.key == KEY_ERROR_CORRECTION_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.errorCorrectionRate = key_value.value - elif key_value.key == KEY_DETECT_INVERTED_MARKER: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.detectInvertedMarker = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_METHOD: - if not isinstance(key_value, KeyValueSimpleString): - mismatched_keys.append(key_value.key) - continue - corner_refinement_method: str = key_value.value - if corner_refinement_method in CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: - # noinspection PyTypeChecker - detection_parameters.cornerRefinementMethod = \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT[corner_refinement_method] - else: - raise MCTDetectorRuntimeError( - message=f"Failed to find corner refinement method {corner_refinement_method}.") - elif key_value.key == KEY_CORNER_REFINEMENT_WIN_SIZE: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementWinSize = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_MAX_ITERATIONS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementMaxIterations = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_MIN_ACCURACY: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementMinAccuracy = key_value.value - elif key_value.key == KEY_APRIL_TAG_CRITICAL_RAD: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagCriticalRad = key_value.value - elif key_value.key == KEY_APRIL_TAG_DEGLITCH: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagDeglitch = int(key_value.value) - elif key_value.key == KEY_APRIL_TAG_MAX_LINE_FIT_MSE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMaxLineFitMse = key_value.value - elif key_value.key == KEY_APRIL_TAG_MAX_N_MAXIMA: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMaxNmaxima = key_value.value - elif key_value.key == KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMinClusterPixels = key_value.value - elif key_value.key == KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMinWhiteBlackDiff = key_value.value - elif key_value.key == KEY_APRIL_TAG_QUAD_DECIMATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagQuadDecimate = key_value.value - elif key_value.key == KEY_APRIL_TAG_QUAD_SIGMA: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagQuadSigma = key_value.value - elif key_value.key == KEY_USE_ARUCO_3_DETECTION: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.useAruco3Detection = key_value.value - elif key_value.key == KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerLengthRatioOriginalImg = key_value.value - elif key_value.key == KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minSideLengthCanonicalImg = key_value.value - else: - mismatched_keys.append(key_value.key) - return mismatched_keys diff --git a/src/gui/gui.py b/src/gui/gui.py index 6e3e79b..23cdda9 100644 --- a/src/gui/gui.py +++ b/src/gui/gui.py @@ -1,11 +1,12 @@ from src.gui.panels import \ BasePanel, \ BoardBuilderPanel, \ - CalibratorPanel, \ + IntrinsicsPanel, \ + ExtrinsicsPanel, \ ControllerPanel, \ DetectorPanel, \ PoseSolverPanel -from src.common import StatusMessageSource +from src.common import SeverityLabel, StatusMessageSource from src.controller import MCTController import logging import wx @@ -14,7 +15,8 @@ CONTROLLER_LABEL: Final[str] = "Controller" DETECTOR_LABEL: Final[str] = "Detector" -CALIBRATOR_LABEL: Final[str] = "Calibrator" +INTRINSIC_CALIBRATOR_LABEL: Final[str] = "Intrinsic Calibrator" +EXTRINSIC_CALIBRATOR_LABEL: Final[str] = "Extrinsic Calibrator" BOARD_BUILDER_LABEL: Final[str] = "Board Builder" POSE_SOLVER_LABEL: Final[str] = "Pose Solver" @@ -28,7 +30,8 @@ class ControllerFrame(wx.Frame): _notebook: wx.Notebook _controller_panel: ControllerPanel _detector_panel: DetectorPanel - _calibrator_panel: CalibratorPanel + _intrinsics_panel: IntrinsicsPanel + _extrinsics_panel: ExtrinsicsPanel _board_builder_panel: BoardBuilderPanel _pose_solver_panel: PoseSolverPanel @@ -74,13 +77,22 @@ def __init__( text=DETECTOR_LABEL, select=False) - self._calibrator_panel = CalibratorPanel( + self._intrinsics_panel = IntrinsicsPanel( parent=self._notebook, controller=self._controller, status_message_source=self._status_message_source) self._notebook.AddPage( - page=self._calibrator_panel, - text=CALIBRATOR_LABEL, + page=self._intrinsics_panel, + text=INTRINSIC_CALIBRATOR_LABEL, + select=False) + + self._extrinsics_panel = ExtrinsicsPanel( + parent=self._notebook, + controller=self._controller, + status_message_source=self._status_message_source) + self._notebook.AddPage( + page=self._extrinsics_panel, + text=EXTRINSIC_CALIBRATOR_LABEL, select=False) self._board_builder_panel = BoardBuilderPanel( @@ -116,7 +128,8 @@ def on_page_changed(self, event: wx.BookCtrlEvent): pages: list[BasePanel] = [ self._controller_panel, self._detector_panel, - self._calibrator_panel, + self._intrinsics_panel, + self._extrinsics_panel, self._board_builder_panel, self._pose_solver_panel] for page in pages: @@ -136,7 +149,7 @@ def update(self, *_args): self._controller.update() except Exception as e: self._controller.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Exception occurred in controller loop: {str(e)}") diff --git a/src/gui/panels/__init__.py b/src/gui/panels/__init__.py index 8979663..0b40839 100644 --- a/src/gui/panels/__init__.py +++ b/src/gui/panels/__init__.py @@ -1,6 +1,7 @@ from .base_panel import BasePanel from .board_builder_panel import BoardBuilderPanel -from .calibrator_panel import CalibratorPanel +from .intrinsics_panel import IntrinsicsPanel +from .extrinsics_panel import ExtrinsicsPanel from .controller_panel import ControllerPanel from .detector_panel import DetectorPanel from .pose_solver_panel import PoseSolverPanel diff --git a/src/gui/panels/base_panel.py b/src/gui/panels/base_panel.py index e335342..8727b3b 100644 --- a/src/gui/panels/base_panel.py +++ b/src/gui/panels/base_panel.py @@ -7,9 +7,6 @@ ParameterText from src.common import \ ErrorResponse, \ - MCTResponse, \ - StatusMessageSource -from src.common.structures import \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ KeyValueSimpleBool, \ @@ -21,7 +18,10 @@ KeyValueMetaBool, \ KeyValueMetaEnum, \ KeyValueMetaFloat, \ - KeyValueMetaInt + KeyValueMetaInt, \ + MCTResponse, \ + SeverityLabel, \ + StatusMessageSource from typing import Final import wx @@ -69,7 +69,7 @@ def populate_key_value_list_from_dynamic_ui( parameter_type = KeyValueSimpleInt else: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to determine parameter type from UI element for key {label}.") continue key_values.append(parameter_type( @@ -120,7 +120,7 @@ def populate_dynamic_ui_from_key_value_list( step_value=key_value.range_step)) else: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Unsupported parameter type {key_value.parsable_type} will not be handled") return return_value @@ -129,7 +129,7 @@ def handle_error_response( response: ErrorResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received error: {response.message}") def handle_unknown_response( @@ -137,12 +137,12 @@ def handle_unknown_response( response: MCTResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received unexpected response: {str(type(response))}") def on_page_select(self): self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message=f"{self.GetName()} on_page_select") self.panel_is_selected = True if not self._update_loop_running: @@ -151,7 +151,7 @@ def on_page_select(self): def on_page_deselect(self): self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message=f"{self.GetName()} on_page_deselect") self.panel_is_selected = False diff --git a/src/gui/panels/board_builder_panel.py b/src/gui/panels/board_builder_panel.py index 021c64f..1b5f8a4 100644 --- a/src/gui/panels/board_builder_panel.py +++ b/src/gui/panels/board_builder_panel.py @@ -1,46 +1,45 @@ -from io import BytesIO -import platform -import uuid +from .base_panel import BasePanel +from .feedback import ImagePanel +from .parameters import \ + ParameterSpinboxFloat, \ + ParameterCheckbox, \ + ParameterText +from .pose_solver_panel import POSE_REPRESENTATIVE_MODEL +from .specialized import GraphicsRenderer +from src.board_builder import BoardBuilder +from src.common.api import \ + EmptyResponse, \ + ErrorResponse, \ + MCTRequestSeries, \ + MCTResponse, \ + MCTResponseSeries +from src.common import \ + Annotation, \ + DetectorFrame, \ + ImageResolution, \ + ImageUtils, \ + Matrix4x4, \ + Pose, \ + MixerFrame, \ + StatusMessageSource +from src.controller import MCTController +from src.detector.api import \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + IntrinsicCalibrationResultGetActiveResponse +from src.gui.panels.detector_panel import _CAPTURE_FORMAT import cv2 +import datetime +from io import BytesIO import logging -from typing import Final import numpy +import os +import platform +from typing import Final +import uuid import wx import wx.grid -from cv2 import aruco -import datetime -import json -import os - -from src.common.api.empty_response import EmptyResponse -from src.common.api.error_response import ErrorResponse -from src.common.api.mct_request_series import MCTRequestSeries -from src.common.api.mct_response import MCTResponse -from src.common.api.mct_response_series import MCTResponseSeries -from src.common.image_coding import ImageCoding -from src.common.image_utils import ImageUtils -from src.common.standard_resolutions import StandardResolutions -from src.common.structures.detector_frame import DetectorFrame -from src.common.structures.image_resolution import ImageResolution -from src.common.structures.marker_snapshot import MarkerSnapshot -from src.detector.api import CameraImageGetRequest, CalibrationResultGetActiveRequest, \ - CalibrationResultGetActiveResponse -from src.detector.api import CameraImageGetResponse -from src.gui.panels.detector_panel import _CAPTURE_FORMAT - -from .base_panel import BasePanel -from .feedback import ImagePanel -from .parameters import ParameterSpinboxFloat, ParameterCheckbox, ParameterText -from src.board_builder import BoardBuilder -from src.common.structures import PoseSolverFrame, Pose, Matrix4x4 -from src.controller import MCTController -from src.common import ( - StatusMessageSource -) -from .pose_solver_panel import POSE_REPRESENTATIVE_MODEL -from .specialized import \ - GraphicsRenderer logger = logging.getLogger(__name__) @@ -56,9 +55,9 @@ class LiveDetectorPreview(BasePanel): image: str def __init__( - self, - detector_label: str, - image_panel: ImagePanel + self, + detector_label: str, + image_panel: ImagePanel ): self.detector_label = detector_label self.image_panel = image_panel @@ -79,11 +78,11 @@ def __init__( _build_board_button: wx.Button _repeatability_testing_checkbox: ParameterCheckbox _reset_button: wx.Button - _live_markers_detected: list[MarkerSnapshot] + _live_markers_detected: list[Annotation] _tracked_target_poses: list[Pose] # This could maybe be added to the LiveDetectorPreview class - _latest_pose_solver_frames: dict[str, PoseSolverFrame] + _latest_pose_solver_frames: dict[str, MixerFrame] live_detector_previews: list[LiveDetectorPreview] def __init__( @@ -346,7 +345,7 @@ def handle_response_series( ) -> None: response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationResultGetActiveResponse): + if isinstance(response, IntrinsicCalibrationResultGetActiveResponse): self._handle_calibration_result_get_active_response( response=response, responder=response_series.responder) @@ -366,7 +365,7 @@ def update_loop(self) -> None: self._renderer.render() if self._controller.is_running(): - detector_data: dict[str, list[MarkerSnapshot]] = {} + detector_data: dict[str, list[Annotation]] = {} should_refresh = False # Add this flag for preview in self.live_detector_previews: @@ -387,7 +386,7 @@ def update_loop(self) -> None: self.handle_response_series(response_series) should_refresh = True # Only refresh when new data is received - detector_data[detector_label] = preview.detector_frame.detected_marker_snapshots + detector_data[detector_label] = preview.detector_frame.annotations_identified if detector_data: self._run_board_builder(detector_data) @@ -406,12 +405,12 @@ def _begin_capture_snapshot(self, preview: LiveDetectorPreview): connection_label=preview.detector_label, request_series=request_series) - def _draw_all_corners(self, detected_marker_snapshots, scale, frame, color): + def _draw_all_corners(self, annotations, scale, frame, color): """ Takes in a dictionary of marker UUIDs to their corners and draws each set of corners on the frame with different colors. """ - corners = self._marker_snapshot_list_to_opencv_points(detected_marker_snapshots, scale) + corners = self._marker_snapshot_list_to_opencv_points(annotations, scale) cv2.polylines( img=frame, @@ -422,7 +421,7 @@ def _draw_all_corners(self, detected_marker_snapshots, scale, frame, color): def _handle_calibration_result_get_active_response( self, - response: CalibrationResultGetActiveResponse, + response: IntrinsicCalibrationResultGetActiveResponse, responder: str ) -> None: if response: @@ -439,14 +438,26 @@ def _handle_capture_snapshot_response( @staticmethod def _marker_snapshot_list_to_opencv_points( - marker_snapshot_list: list[MarkerSnapshot], - scale: float + marker_snapshot_list: list[Annotation], + scale: float ) -> numpy.ndarray: - corners: list[list[list[(float, float)]]] = [[[ - (corner_point.x_px * scale, corner_point.y_px * scale) - for corner_point in marker.corner_image_points - ]] for marker in marker_snapshot_list] - return_value = numpy.array(corners, dtype=numpy.int32) + if len(marker_snapshot_list) <= 0: + return numpy.asarray([], dtype=numpy.int32) + return_value: list[list[list[(float, float)]]] = list() + current_base_label: str = marker_snapshot_list[0].base_feature_label() + current_shape_points: list[list[(float, float)]] = [[ + marker_snapshot_list[0].x_px * scale, + marker_snapshot_list[0].y_px * scale]] + for marker_snapshot in marker_snapshot_list: + annotation_base_label = marker_snapshot.base_feature_label() + if annotation_base_label != current_base_label: + return_value.append(current_shape_points) + current_base_label = annotation_base_label + current_shape_points.append([ + marker_snapshot.x_px * scale, + marker_snapshot.y_px * scale]) + return_value.append(current_shape_points) + return_value = numpy.asarray(return_value, dtype=numpy.int32) return return_value def _on_build_board_button_click(self, event: wx.CommandEvent) -> None: @@ -541,13 +552,13 @@ def _on_reset_button_click(self, event: wx.CommandEvent) -> None: def _process_frame(self, preview: LiveDetectorPreview): # TODO: The Detector should tell us the resolution of the image it operated on. - resolution_str: str = str(StandardResolutions.RES_1280X720) + resolution_str: str = str(ImageUtils.StandardResolutions.RES_1280X720) image_panel = preview.image_panel display_image: numpy.ndarray scale: float | None if self._preview_image_checkbox.checkbox.GetValue() and preview.image is not None: - opencv_image: numpy.ndarray = ImageCoding.base64_to_image(input_base64=preview.image) + opencv_image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=preview.image) display_image: numpy.ndarray = ImageUtils.image_resize_to_fit( opencv_image=opencv_image, available_size=image_panel.GetSize()) @@ -568,11 +579,19 @@ def _process_frame(self, preview: LiveDetectorPreview): if scale is not None: if self._annotate_detected_checkbox.checkbox.GetValue(): - self._draw_all_corners(preview.detector_frame.detected_marker_snapshots, scale, display_image, [255, 191, 127]) + self._draw_all_corners( + annotations=preview.detector_frame.annotations_identified, + scale=scale, + frame=display_image, + color=[255, 191, 127]) if self._annotate_rejected_checkbox.checkbox.GetValue(): - self._draw_all_corners(preview.detector_frame.rejected_marker_snapshots, scale, display_image, [127, 191, 255]) + self._draw_all_corners( + annotations=preview.detector_frame.annotations, + scale=scale, + frame=display_image, + color=[127, 191, 255]) - image_buffer: bytes = ImageCoding.image_to_bytes(image_data=display_image, image_format=".jpg") + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") image_buffer_io: BytesIO = BytesIO(image_buffer) wx_image: wx.Image = wx.Image(image_buffer_io) wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() @@ -580,10 +599,10 @@ def _process_frame(self, preview: LiveDetectorPreview): image_panel.paint() def _render_frame(self, detector_poses, target_poses): - pose_solver_frame = PoseSolverFrame( + pose_solver_frame = MixerFrame( detector_poses=detector_poses, target_poses=target_poses, - timestamp_utc_iso8601=datetime.datetime.utcnow().isoformat() + timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat() ) ### RENDERER ### diff --git a/src/gui/panels/controller_panel.py b/src/gui/panels/controller_panel.py index 9344cb7..716a29a 100644 --- a/src/gui/panels/controller_panel.py +++ b/src/gui/panels/controller_panel.py @@ -4,12 +4,11 @@ LogPanel from src.common import \ DequeueStatusMessagesResponse, \ + StatusMessage, \ StatusMessageSource -from src.common.structures import \ - StatusMessage from src.controller import \ - MCTController, \ - ConnectionReport + Connection, \ + MCTController from typing import Final import wx import wx.grid @@ -28,7 +27,7 @@ class ControllerPanel(BasePanel): _log_panel: LogPanel _controller_status: str # last status reported by MCTController - _connection_reports: list[ConnectionReport] + _connection_reports: list[Connection.Report] _is_updating: bool # Some things should only trigger during explicit user events def __init__( @@ -175,7 +174,7 @@ def update_controller_buttons(self): def update_connection_table_display(self) -> None: # Return if there is no change - connection_reports: list[ConnectionReport] = self._controller.get_connection_reports() + connection_reports: list[Connection.Report] = self._controller.get_connection_reports() if len(connection_reports) == len(self._connection_reports): identical: bool = True for connection_report in connection_reports: diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index bb44b96..c93b2c7 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -8,35 +8,32 @@ ParameterSpinboxFloat, \ ParameterSelector from src.common import \ + Annotation, \ + DetectorFrame, \ ErrorResponse, \ EmptyResponse, \ - ImageCoding, \ + ImageFormat, \ + ImageResolution, \ ImageUtils, \ + KeyValueSimpleAny, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ - StandardResolutions, \ StatusMessageSource -from src.common.structures import \ - CaptureFormat, \ - DetectorFrame, \ - ImageResolution, \ - KeyValueSimpleAny, \ - MarkerSnapshot from src.controller import \ MCTController from src.detector.api import \ - CalibrationImageAddRequest, \ - CalibrationImageAddResponse, \ + IntrinsicCalibrationImageAddRequest, \ + IntrinsicCalibrationImageAddResponse, \ CameraImageGetRequest, \ CameraImageGetResponse, \ CameraParametersGetRequest, \ CameraParametersGetResponse, \ CameraParametersSetRequest, \ CameraParametersSetResponse, \ - MarkerParametersGetRequest, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest + AnnotatorParametersGetRequest, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest import cv2 from io import BytesIO import logging @@ -50,9 +47,9 @@ _UPDATE_INTERVAL_MILLISECONDS: Final[int] = 16 _SUPPORTED_RESOLUTIONS: Final[list[ImageResolution]] = [ - StandardResolutions.RES_640X480, - StandardResolutions.RES_1280X720, - StandardResolutions.RES_1920X1080] + ImageUtils.StandardResolutions.RES_640X480, + ImageUtils.StandardResolutions.RES_1280X720, + ImageUtils.StandardResolutions.RES_1920X1080] _SUPPORTED_FPS: Final[list[str]] = [ "15", "30", @@ -62,7 +59,7 @@ "SUBPIX", "CONTOUR", "APRILTAG"] -_CAPTURE_FORMAT: CaptureFormat = ".jpg" +_CAPTURE_FORMAT: ImageFormat = ImageFormat.FORMAT_JPG _CAMERA_PARAMETER_SLOT_COUNT: Final[int] = 100 @@ -75,8 +72,8 @@ class DetectorPanel(BasePanel): _live_preview_request_id: uuid.UUID | None _live_preview_image_base64: str | None - _live_markers_detected: list[MarkerSnapshot] - _live_markers_rejected: list[MarkerSnapshot] + _live_markers_detected: list[Annotation] + _live_markers_rejected: list[Annotation] _live_resolution: ImageResolution | None _detector_selector: ParameterSelector @@ -281,7 +278,7 @@ def __init__( def begin_capture_calibration(self) -> None: selected_detector_label: str = self._detector_selector.selector.GetStringSelection() - request_series: MCTRequestSeries = MCTRequestSeries(series=[CalibrationImageAddRequest()]) + request_series: MCTRequestSeries = MCTRequestSeries(series=[IntrinsicCalibrationImageAddRequest()]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, request_series=request_series) @@ -302,7 +299,7 @@ def begin_get_detector_parameters(self): request_series: MCTRequestSeries = MCTRequestSeries( series=[ CameraParametersGetRequest(), - MarkerParametersGetRequest()]) + AnnotatorParametersGetRequest()]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, request_series=request_series) @@ -326,8 +323,8 @@ def begin_set_detection_parameters(self): key_values: list[KeyValueSimpleAny] = self.populate_key_value_list_from_dynamic_ui( parameter_uis=self._marker_parameter_uis) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - MarkerParametersSetRequest(parameters=key_values), - MarkerParametersGetRequest()]) # sync + AnnotatorParametersSetRequest(parameters=key_values), + AnnotatorParametersGetRequest()]) # sync self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, request_series=request_series) @@ -341,13 +338,13 @@ def handle_response_series( ) -> None: response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationImageAddResponse): + if isinstance(response, IntrinsicCalibrationImageAddResponse): self._handle_add_calibration_image_response(response=response) elif isinstance(response, CameraImageGetResponse): self._handle_capture_snapshot_response(response=response) elif isinstance(response, CameraParametersGetResponse): self._handle_get_capture_parameters_response(response=response) - elif isinstance(response, MarkerParametersGetResponse): + elif isinstance(response, AnnotatorParametersGetResponse): self._handle_get_detection_parameters_response(response=response) elif isinstance(response, ErrorResponse): self.handle_error_response(response=response) @@ -356,7 +353,7 @@ def handle_response_series( def _handle_add_calibration_image_response( self, - response: CalibrationImageAddResponse + response: IntrinsicCalibrationImageAddResponse ): self.status_message_source.enqueue_status_message( severity="info", @@ -388,7 +385,7 @@ def _handle_get_capture_parameters_response( # noinspection DuplicatedCode def _handle_get_detection_parameters_response( self, - response: MarkerParametersGetResponse + response: AnnotatorParametersGetResponse ): self._marker_parameter_panel.Freeze() self._marker_parameter_sizer.Clear(True) @@ -403,14 +400,26 @@ def _handle_get_detection_parameters_response( @staticmethod def _marker_snapshot_list_to_opencv_points( - marker_snapshot_list: list[MarkerSnapshot], + marker_snapshot_list: list[Annotation], scale: float ) -> numpy.ndarray: - corners: list[list[list[(float, float)]]] = [[[ - (corner_point.x_px * scale, corner_point.y_px * scale) - for corner_point in marker.corner_image_points - ]] for marker in marker_snapshot_list] - return_value = numpy.array(corners, dtype=numpy.int32) + if len(marker_snapshot_list) <= 0: + return numpy.asarray([], dtype=numpy.int32) + return_value: list[list[list[(float, float)]]] = list() + current_base_label: str | None = None + current_shape_points: list[list[(float, float)]] | None = None + for marker_snapshot in marker_snapshot_list: + annotation_base_label = marker_snapshot.base_feature_label() + if annotation_base_label != current_base_label: + if current_shape_points is not None: + return_value.append(current_shape_points) + current_shape_points = list() + current_base_label = annotation_base_label + current_shape_points.append([ + marker_snapshot.x_px * scale, + marker_snapshot.y_px * scale]) + return_value.append(current_shape_points) + return_value = numpy.asarray(return_value, dtype=numpy.int32) return return_value def on_calibration_capture_pressed(self, _event: wx.CommandEvent): @@ -480,8 +489,8 @@ def update_loop(self): detector_frame: DetectorFrame | None = self._controller.get_live_detector_frame( detector_label=detector_label) if detector_frame is not None: - self._live_markers_detected = detector_frame.detected_marker_snapshots - self._live_markers_rejected = detector_frame.rejected_marker_snapshots + self._live_markers_detected = detector_frame.annotations_identified + self._live_markers_rejected = detector_frame.annotations_unidentified self._live_resolution = detector_frame.image_resolution if self._preview_image_checkbox.checkbox.GetValue() and self._live_preview_request_id is None: if self._live_resolution is not None: @@ -521,10 +530,11 @@ def _update_ui_image(self): else: scale: float | None if self._live_preview_image_base64 is not None: - opencv_image: numpy.ndarray = ImageCoding.base64_to_image(input_base64=self._live_preview_image_base64) + opencv_image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=self._live_preview_image_base64) display_image: numpy.ndarray = ImageUtils.image_resize_to_fit( opencv_image=opencv_image, available_size=self._image_panel.GetSize()) + cv2.cvtColor(display_image, cv2.COLOR_RGB2BGR, display_image) scale: float = self._preview_scale_factor.get_value() * display_image.shape[0] / opencv_image.shape[0] else: display_image = ImageUtils.black_image(resolution_px=self._image_panel.GetSize()) @@ -556,7 +566,7 @@ def _update_ui_image(self): color=[127, 191, 255], # orange thickness=2) - image_buffer: bytes = ImageCoding.image_to_bytes(image_data=display_image, image_format=".jpg") + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") image_buffer_io: BytesIO = BytesIO(image_buffer) wx_image: wx.Image = wx.Image(image_buffer_io) wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() diff --git a/src/gui/panels/extrinsics_panel.py b/src/gui/panels/extrinsics_panel.py new file mode 100644 index 0000000..07ca415 --- /dev/null +++ b/src/gui/panels/extrinsics_panel.py @@ -0,0 +1,688 @@ +from .base_panel import \ + BasePanel +from .feedback import \ + ImagePanel +from .parameters import \ + ParameterSelector, \ + ParameterText +from .specialized import \ + CalibrationImageTable, \ + CalibrationResultTable +from src.common import \ + ErrorResponse, \ + EmptyResponse, \ + ExtrinsicCalibrator, \ + ImageFormat, \ + ImageResolution, \ + ImageUtils, \ + IntrinsicCalibrator, \ + MCTRequestSeries, \ + MCTResponse, \ + MCTResponseSeries, \ + StatusMessageSource +from src.controller import \ + MCTController +from src.detector import \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse +from src.mixer import \ + ExtrinsicCalibrationCalculateRequest, \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationDeleteStagedRequest, \ + ExtrinsicCalibrationImageAddRequest, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetRequest, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListRequest, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationImageMetadataUpdateRequest, \ + ExtrinsicCalibrationResultGetRequest, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultMetadataListRequest, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + ExtrinsicCalibrationResultMetadataUpdateRequest, \ + MixerUpdateIntrinsicParametersRequest +import datetime +from io import BytesIO +import logging +import numpy +from typing import Optional +import uuid +import wx +import wx.grid + + +logger = logging.getLogger(__name__) +_PREVIEW_CAPTURE_FORMAT: ImageFormat = ImageFormat.FORMAT_JPG + + +class ExtrinsicsPanel(BasePanel): + + _controller: MCTController + + _mixer_selector: ParameterSelector + _reload_button: wx.Button + _preview_toggle_button: wx.ToggleButton + _capture_button: wx.Button + _image_table: CalibrationImageTable + _image_label_textbox: ParameterText + _image_state_selector: ParameterSelector + _image_update_button: wx.Button + _calibrate_button: wx.Button + _calibrate_status_textbox: wx.TextCtrl + _result_table: CalibrationResultTable + _result_display_textbox: wx.TextCtrl + _result_label_textbox: ParameterText + _result_state_selector: ParameterSelector + _result_update_button: wx.Button + _image_panel: ImagePanel + + _control_blocking_request_ids: set[uuid.UUID] + _is_updating: bool # Some things should only trigger during explicit user events + _preview_request_ids_by_detector_label: dict[str, uuid.UUID] + _preview_images_by_detector_label: dict[str, numpy.ndarray] + _extrinsic_image: numpy.ndarray | None + _current_capture_timestamp: datetime.datetime | None # None indicates no capture in progress + _calibration_in_progress: bool + _image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] + _result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] + + def __init__( + self, + parent: wx.Window, + controller: MCTController, + status_message_source: StatusMessageSource, + name: str = "IntrinsicsPanel" + ): + super().__init__( + parent=parent, + status_message_source=status_message_source, + name=name) + self._controller = controller + + self._control_blocking_request_ids = set() + self._is_updating = False + self._preview_request_ids_by_detector_label = dict() + self._preview_images_by_detector_label = dict() + self._extrinsic_image = None + self._current_capture_timestamp = None + self._calibration_in_progress = False + self._image_metadata_list = list() + self._result_metadata_list = list() + + horizontal_split_sizer: wx.BoxSizer = wx.BoxSizer(orient=wx.HORIZONTAL) + + control_border_panel: wx.Panel = wx.Panel(parent=self) + control_border_box: wx.StaticBoxSizer = wx.StaticBoxSizer( + orient=wx.VERTICAL, + parent=control_border_panel) + control_panel: wx.ScrolledWindow = wx.ScrolledWindow( + parent=control_border_panel) + control_panel.SetScrollRate( + xstep=1, + ystep=1) + control_panel.ShowScrollbars( + horz=wx.SHOW_SB_NEVER, + vert=wx.SHOW_SB_ALWAYS) + + control_sizer: wx.BoxSizer = wx.BoxSizer(orient=wx.VERTICAL) + + self._mixer_selector = self.add_control_selector( + parent=control_panel, + sizer=control_sizer, + label="Mixer", + selectable_values=list()) + + self._reload_button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Reload Metadata") + + self._preview_toggle_button = wx.ToggleButton( + parent=control_panel, + label="Preview") + control_sizer.Add( + window=self._preview_toggle_button, + flags=wx.SizerFlags(0).Expand()) + control_sizer.AddSpacer(size=BasePanel.DEFAULT_SPACING_PX_VERTICAL) + + self._capture_button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Capture") + + self._calibrate_button: wx.Button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Calibrate") + + self._calibrate_status_textbox = wx.TextCtrl( + parent=control_panel, + style=wx.TE_READONLY | wx.TE_RICH) + self._calibrate_status_textbox.SetEditable(False) + self._calibrate_status_textbox.SetBackgroundColour(colour=wx.Colour(red=249, green=249, blue=249, alpha=255)) + control_sizer.Add( + window=self._calibrate_status_textbox, + flags=wx.SizerFlags(0).Expand()) + + self.add_horizontal_line_to_spacer( + parent=control_panel, + sizer=control_sizer) + + self._image_table = CalibrationImageTable(parent=control_panel) + self._image_table.SetMaxSize((-1, self._image_table.GetSize().GetHeight())) + control_sizer.Add( + window=self._image_table, + flags=wx.SizerFlags(0).Expand()) + control_sizer.AddSpacer(size=BasePanel.DEFAULT_SPACING_PX_VERTICAL) + + self._image_label_textbox: ParameterText = self.add_control_text_input( + parent=control_panel, + sizer=control_sizer, + label="Image Label") + + self._image_state_selector: ParameterSelector = self.add_control_selector( + parent=control_panel, + sizer=control_sizer, + label="Image State", + selectable_values=[state.name for state in IntrinsicCalibrator.ImageState]) + + self._image_update_button: wx.Button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Update Image") + + self.add_horizontal_line_to_spacer( + parent=control_panel, + sizer=control_sizer) + + self._result_table = CalibrationResultTable(parent=control_panel) + control_sizer.Add( + window=self._result_table, + flags=wx.SizerFlags(0).Expand()) + control_sizer.AddSpacer(size=BasePanel.DEFAULT_SPACING_PX_VERTICAL) + + self._result_display_textbox = wx.TextCtrl( + parent=control_panel, + style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH) + self._result_display_textbox.SetEditable(False) + self._result_display_textbox.SetBackgroundColour(colour=wx.Colour(red=249, green=249, blue=249, alpha=255)) + control_sizer.Add( + window=self._result_display_textbox, + flags=wx.SizerFlags(1).Align(wx.EXPAND)) + + self._result_label_textbox: ParameterText = self.add_control_text_input( + parent=control_panel, + sizer=control_sizer, + label="Result Label") + + self._result_state_selector: ParameterSelector = self.add_control_selector( + parent=control_panel, + sizer=control_sizer, + label="Result State", + selectable_values=[state.name for state in IntrinsicCalibrator.ResultState]) + + self._result_update_button: wx.Button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Update Result") + + self.add_horizontal_line_to_spacer( + parent=control_panel, + sizer=control_sizer) + + control_spacer_sizer: wx.BoxSizer = wx.BoxSizer(orient=wx.HORIZONTAL) + control_sizer.Add( + sizer=control_spacer_sizer, + flags=wx.SizerFlags(1).Expand()) + + control_panel.SetSizerAndFit(sizer=control_sizer) + control_border_box.Add( + window=control_panel, + flags=wx.SizerFlags(1).Expand()) + control_border_panel.SetSizer(sizer=control_border_box) + horizontal_split_sizer.Add( + window=control_border_panel, + flags=wx.SizerFlags(50).Expand()) + + self._image_panel = ImagePanel(parent=self) + self._image_panel.SetBackgroundColour(colour=wx.BLACK) + horizontal_split_sizer.Add( + window=self._image_panel, + flags=wx.SizerFlags(50).Expand()) + + self.SetSizerAndFit(sizer=horizontal_split_sizer) + + self._mixer_selector.selector.Bind( + event=wx.EVT_CHOICE, + handler=self._on_mixer_reload) + self._reload_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_mixer_reload) + self._preview_toggle_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_preview_toggled) + self._capture_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_capture_pressed) + self._calibrate_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_calibrate_pressed) + self._image_table.table.Bind( + event=wx.grid.EVT_GRID_SELECT_CELL, + handler=self._on_image_metadata_selected) + self._image_update_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_image_update_pressed) + self._result_table.table.Bind( + event=wx.grid.EVT_GRID_SELECT_CELL, + handler=self._on_result_metadata_selected) + self._result_update_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_result_update_pressed) + + def handle_error_response( + self, + response: ErrorResponse + ): + super().handle_error_response(response=response) + if self._calibration_in_progress: + self._calibrate_status_textbox.SetForegroundColour(colour=wx.Colour(red=127, green=0, blue=0, alpha=255)) + self._calibrate_status_textbox.SetValue(f"Error: {response.message}") + + def handle_response_series( + self, + response_series: MCTResponseSeries, + task_description: Optional[str] = None, + expected_response_count: Optional[int] = None + ) -> None: + response: MCTResponse + for response in response_series.series: + if isinstance(response, CameraImageGetResponse): + self._handle_response_camera_image_get(response=response, detector_label=response_series.responder) + elif isinstance(response, ExtrinsicCalibrationCalculateResponse): + self._handle_response_extrinsic_calibration_calculate(response=response) + elif isinstance(response, ExtrinsicCalibrationImageAddResponse): + self._handle_response_extrinsic_calibration_image_add(response=response) + elif isinstance(response, ExtrinsicCalibrationImageGetResponse): + self._handle_response_extrinsic_calibration_image_get(response=response) + elif isinstance(response, ExtrinsicCalibrationResultGetResponse): + self._handle_response_extrinsic_calibration_result_get(response=response) + elif isinstance(response, ExtrinsicCalibrationImageMetadataListResponse): + self._handle_response_extrinsic_calibration_image_metadata_list(response=response) + elif isinstance(response, ExtrinsicCalibrationResultMetadataListResponse): + self._handle_response_extrinsic_calibration_result_metadata_list(response=response) + elif isinstance(response, IntrinsicCalibrationResultGetActiveResponse): + self._handle_response_intrinsic_calibration_result_get_active( + response=response, + detector_label=response_series.responder) + elif isinstance(response, ErrorResponse): + self.handle_error_response(response=response) + elif not isinstance(response, EmptyResponse): + self.handle_unknown_response(response=response) + + def on_page_select(self) -> None: + super().on_page_select() + available_mixer_labels: list[str] = self._controller.get_active_mixer_labels() + self._mixer_selector.set_options(option_list=available_mixer_labels) + self._update_ui_controls() + + def update_loop(self) -> None: + super().update_loop() + self._is_updating = True + + response_series: MCTResponseSeries | None + responded_request_ids: list[tuple[uuid.UUID, MCTResponseSeries]] = list() + for request_id in self._control_blocking_request_ids: + _, response_series = self._controller.response_series_pop(request_series_id=request_id) + if response_series is not None: + responded_request_ids.append((request_id, response_series)) + if len(responded_request_ids) > 0: + # Clean up the request id list first + for request_id, response_series in responded_request_ids: + self._control_blocking_request_ids.remove(request_id) + # THEN handle the responses. This order of ops is assumed in calibration handling. + for request_id, response_series in responded_request_ids: + self.handle_response_series(response_series) + self._update_ui_controls() + + if self._preview_toggle_button.GetValue(): + detector_labels_with_responses: set[str] = set() + for detector_label, request_id in self._preview_request_ids_by_detector_label.items(): + _, response_series = self._controller.response_series_pop(request_series_id=request_id) + if response_series is not None and \ + len(response_series.series) > 0 and \ + isinstance(response_series.series[0], CameraImageGetResponse): + response: CameraImageGetResponse = response_series.series[0] + self._preview_images_by_detector_label[detector_label] = \ + ImageUtils.base64_to_image(response.image_base64) + detector_labels_with_responses.add(detector_label) + detector_labels: list[str] = self._controller.get_active_detector_labels() + for detector_label in detector_labels: + if detector_label in detector_labels_with_responses or \ + detector_label not in self._preview_request_ids_by_detector_label: + request_series: MCTRequestSeries = MCTRequestSeries( + series=[CameraImageGetRequest( + format=_PREVIEW_CAPTURE_FORMAT, + requested_resolution=ImageResolution(x_px=800, y_px=480))]) # TODO: Parameterize + preview_request_id = self._controller.request_series_push( + connection_label=detector_label, + request_series=request_series) + self._preview_request_ids_by_detector_label[detector_label] = preview_request_id + + self._update_ui_image() + + self._is_updating = False + + def _handle_response_camera_image_get( + self, + response: CameraImageGetResponse, + detector_label: str + ) -> None: + # Note: This is for the control-blocking requests ONLY! + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageAddRequest( + image_base64=response.image_base64, + detector_label=detector_label, + timestamp_utc_iso8601=self._current_capture_timestamp.isoformat()), + ExtrinsicCalibrationImageMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + + def _handle_response_intrinsic_calibration_result_get_active( + self, + response: IntrinsicCalibrationResultGetActiveResponse, + detector_label: str + ) -> None: + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + MixerUpdateIntrinsicParametersRequest( + detector_label=detector_label, + intrinsic_parameters=response.intrinsic_calibration.calibrated_values)]) + if len(self._control_blocking_request_ids) <= 0: # This is the last intrinsic - we are ready to calculate + request_series.series.append(ExtrinsicCalibrationCalculateRequest()) + request_series.series.append(ExtrinsicCalibrationResultMetadataListRequest()) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + + def _handle_response_extrinsic_calibration_calculate( + self, + response: ExtrinsicCalibrationCalculateResponse + ) -> None: + if not self._calibration_in_progress: + self.status_message_source.enqueue_status_message( + severity="warning", + message=f"Received CalibrateResponse while no calibration is in progress.") + self._calibrate_status_textbox.SetForegroundColour(colour=wx.Colour(red=0, green=0, blue=127, alpha=255)) + self._calibrate_status_textbox.SetValue(f"Calibration {response.result_identifier} complete.") + self._result_display_textbox.SetValue(response.extrinsic_calibration.model_dump_json(indent=4)) + self._calibration_in_progress = False + + # noinspection PyUnusedLocal + def _handle_response_extrinsic_calibration_image_add( + self, + response: ExtrinsicCalibrationImageAddResponse + ) -> None: + if len(self._control_blocking_request_ids) <= 0: + self._current_capture_timestamp = None + + def _handle_response_extrinsic_calibration_image_get( + self, + response: ExtrinsicCalibrationImageGetResponse + ) -> None: + self._extrinsic_image = ImageUtils.base64_to_image(response.image_base64) + + def _handle_response_extrinsic_calibration_image_metadata_list( + self, + response: ExtrinsicCalibrationImageMetadataListResponse + ) -> None: + self._image_metadata_list = response.metadata_list + self._image_table.update_contents(row_contents=self._image_metadata_list) + + def _handle_response_extrinsic_calibration_result_get( + self, + response: ExtrinsicCalibrationResultGetResponse + ) -> None: + self._result_display_textbox.SetValue(str(response.extrinsic_calibration.model_dump_json(indent=4))) + + def _handle_response_extrinsic_calibration_result_metadata_list( + self, + response: ExtrinsicCalibrationResultMetadataListResponse + ) -> None: + self._result_metadata_list = response.metadata_list + self._result_table.update_contents(row_contents=self._result_metadata_list) + + def _on_mixer_reload(self, _event: wx.CommandEvent) -> None: + self._image_metadata_list = list() + self._result_metadata_list = list() + self._calibrate_status_textbox.SetValue(str()) + self._result_display_textbox.SetValue(str()) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageMetadataListRequest(), + ExtrinsicCalibrationResultMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_preview_toggled(self, _event: wx.CommandEvent) -> None: + if self._is_updating: + return + self._image_table.set_selected_row_index(None) + self._update_ui_controls() + + def _on_capture_pressed(self, _event: wx.CommandEvent) -> None: + self._current_capture_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) + detector_labels: list[str] = self._controller.get_active_detector_labels() + for detector_label in detector_labels: + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + CameraImageGetRequest(format=ImageFormat.FORMAT_PNG)]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=detector_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_calibrate_pressed(self, _event: wx.CommandEvent) -> None: + self._calibrate_status_textbox.SetForegroundColour(colour=wx.Colour(red=0, green=0, blue=0, alpha=255)) + self._calibrate_status_textbox.SetValue("Calibrating...") + self._result_display_textbox.SetValue(str()) + detector_labels: list[str] = self._controller.get_active_detector_labels() + for detector_label in detector_labels: + request_series: MCTRequestSeries = MCTRequestSeries(series=[IntrinsicCalibrationResultGetActiveRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=detector_label, + request_series=request_series)) + self._calibration_in_progress = True + self._update_ui_controls() + + def _on_image_metadata_selected(self, _event: wx.grid.GridEvent) -> None: + if self._is_updating: + return + self._preview_toggle_button.SetValue(False) + image_index: int = self._image_table.get_selected_row_index() + image_identifier: str | None = self._image_metadata_list[image_index].identifier + if image_identifier is not None: + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageGetRequest(image_identifier=image_identifier)]) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_image_update_pressed(self, _event: wx.CommandEvent) -> None: + self._calibrate_status_textbox.SetValue(str()) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + image_index: int = self._image_table.get_selected_row_index() + image_identifier: str = self._image_metadata_list[image_index].identifier + image_state: IntrinsicCalibrator.ImageState = \ + ExtrinsicCalibrator.ImageState[self._image_state_selector.selector.GetStringSelection()] + image_label: str = self._image_label_textbox.textbox.GetValue() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageMetadataUpdateRequest( + image_identifier=image_identifier, + image_state=image_state, + image_label=image_label), + ExtrinsicCalibrationDeleteStagedRequest(), + ExtrinsicCalibrationImageMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_result_metadata_selected(self, _event: wx.grid.GridEvent) -> None: + if self._is_updating: + return + self._preview_toggle_button.SetValue(False) + self._result_display_textbox.SetValue(str()) + result_index: int = self._result_table.get_selected_row_index() + result_identifier: str | None = self._result_metadata_list[result_index].identifier + if result_identifier is not None: + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationResultGetRequest(result_identifier=result_identifier)]) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_result_update_pressed(self, _event: wx.CommandEvent) -> None: + self._result_display_textbox.SetValue(str()) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + result_index: int = self._result_table.get_selected_row_index() + result_identifier: str = self._result_metadata_list[result_index].identifier + result_state: ExtrinsicCalibrator.ResultState = \ + ExtrinsicCalibrator.ResultState[self._result_state_selector.selector.GetStringSelection()] + result_label: str = self._result_label_textbox.textbox.GetValue() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationResultMetadataUpdateRequest( + result_identifier=result_identifier, + result_state=result_state, + result_label=result_label), + ExtrinsicCalibrationDeleteStagedRequest(), + ExtrinsicCalibrationResultMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _update_ui_controls(self) -> None: + self._mixer_selector.Enable(False) + self._reload_button.Enable(False) + self._preview_toggle_button.Enable(False) + self._capture_button.Enable(False) + self._calibrate_button.Enable(False) + self._image_table.Enable(False) + self._image_label_textbox.Enable(False) + self._image_label_textbox.textbox.SetValue(str()) + self._image_state_selector.Enable(False) + self._image_state_selector.selector.SetStringSelection(str()) + self._image_update_button.Enable(False) + self._calibrate_status_textbox.Enable(False) + self._result_table.Enable(False) + self._result_display_textbox.Enable(False) + self._result_label_textbox.Enable(False) + self._result_label_textbox.textbox.SetValue(str()) + self._result_state_selector.Enable(False) + self._result_state_selector.selector.SetStringSelection(str()) + self._result_update_button.Enable(False) + if len(self._control_blocking_request_ids) > 0: + return # We're waiting for something + self._mixer_selector.Enable(True) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + if len(mixer_label) <= 0: + self._preview_toggle_button.SetValue(False) + return + self._reload_button.Enable(True) + self._preview_toggle_button.Enable(True) + self._capture_button.Enable(True) + # == NO RETURN GUARDS AFTER THIS POINT == + if len(self._image_metadata_list) > 0: + self._image_table.Enable(True) + image_index: int | None = self._image_table.get_selected_row_index() + if image_index is not None: + if image_index >= len(self._image_metadata_list): + self.status_message_source.enqueue_status_message( + severity="warning", + message=f"Selected image index {image_index} is out of bounds. Setting to None.") + self._image_table.set_selected_row_index(None) + else: + image_metadata: IntrinsicCalibrator.ImageMetadata = self._image_metadata_list[image_index] + self._image_label_textbox.Enable(True) + self._image_label_textbox.textbox.SetValue(image_metadata.label) + self._image_state_selector.Enable(True) + self._image_state_selector.selector.SetStringSelection(image_metadata.state.name) + self._image_update_button.Enable(True) + calibration_image_count: int = 0 + for image_metadata in self._image_metadata_list: + if image_metadata.state == IntrinsicCalibrator.ImageState.SELECT: + calibration_image_count += 1 + if calibration_image_count > 0: + self._calibrate_button.Enable(True) + self._calibrate_status_textbox.Enable(True) + if len(self._result_metadata_list) > 0: + self._result_table.Enable(True) + result_index: int | None = self._result_table.get_selected_row_index() + if result_index is not None: + if result_index >= len(self._result_metadata_list): + self.status_message_source.enqueue_status_message( + severity="warning", + message=f"Selected result index {result_index} is out of bounds. Setting to None.") + self._result_table.set_selected_row_index(None) + else: + result_metadata: IntrinsicCalibrator.ResultMetadata = self._result_metadata_list[result_index] + self._result_display_textbox.Enable(True) + self._result_label_textbox.Enable(True) + self._result_label_textbox.textbox.SetValue(result_metadata.label) + self._result_state_selector.Enable(True) + self._result_state_selector.selector.SetStringSelection(result_metadata.state.name) + self._result_update_button.Enable(True) + self.Layout() + self.Refresh() + self.Update() + + def _update_ui_image(self): + display_image: numpy.ndarray = ImageUtils.black_image(resolution_px=self._image_panel.GetSize()) + available_size_px: int = (display_image.shape[1], display_image.shape[0]) + if self._preview_toggle_button.GetValue(): + detector_labels: list[str] = self._controller.get_active_detector_labels() + image_dimensions: tuple[int, int] + image_positions: list[tuple[int, int]] + image_dimensions, image_positions = ImageUtils.partition_rect( + available_size_px=available_size_px, + partition_count=len(detector_labels)) + for detector_index, detector_label in enumerate(detector_labels): + if detector_label in self._preview_images_by_detector_label: + detector_image: numpy.ndarray = self._preview_images_by_detector_label[detector_label] + detector_image = ImageUtils.image_resize_to_fit( + opencv_image=detector_image, + available_size=image_dimensions) + offset_y_px: int = image_positions[detector_index][1] + (image_dimensions[1] - detector_image.shape[0]) // 2 + offset_x_px: int = image_positions[detector_index][0] + (image_dimensions[0] - detector_image.shape[1]) // 2 + display_image[ + offset_y_px:offset_y_px + detector_image.shape[0], + offset_x_px:offset_x_px + detector_image.shape[1] + ] = detector_image + elif self._extrinsic_image is not None: + extrinsic_image: numpy.ndarray = ImageUtils.image_resize_to_fit( + opencv_image=self._extrinsic_image, + available_size=available_size_px) + offset_y_px: int = (display_image.shape[0] - extrinsic_image.shape[0]) // 2 + offset_x_px: int = (display_image.shape[1] - extrinsic_image.shape[1]) // 2 + display_image[ + offset_y_px:offset_y_px + extrinsic_image.shape[0], + offset_x_px:offset_x_px + extrinsic_image.shape[1], + ] = extrinsic_image + + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") + image_buffer_io: BytesIO = BytesIO(image_buffer) + wx_image: wx.Image = wx.Image(image_buffer_io) + wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() + self._image_panel.set_bitmap(wx_bitmap) + self._image_panel.paint() diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/intrinsics_panel.py similarity index 86% rename from src/gui/panels/calibrator_panel.py rename to src/gui/panels/intrinsics_panel.py index bb6b928..f6c0be9 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/intrinsics_panel.py @@ -11,37 +11,31 @@ from src.common import \ ErrorResponse, \ EmptyResponse, \ - ImageCoding, \ + ImageResolution, \ ImageUtils, \ + IntrinsicCalibrator, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ StatusMessageSource -from src.common.structures import \ - ImageResolution from src.controller import \ MCTController -from src.detector.api import \ - CalibrationCalculateRequest, \ - CalibrationCalculateResponse, \ - CalibrationDeleteStagedRequest, \ - CalibrationImageGetRequest, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListRequest, \ - CalibrationImageMetadataListResponse, \ - CalibrationImageMetadataUpdateRequest, \ - CalibrationResolutionListRequest, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetRequest, \ - CalibrationResultGetResponse, \ - CalibrationResultMetadataListRequest, \ - CalibrationResultMetadataListResponse, \ - CalibrationResultMetadataUpdateRequest -from src.detector.structures import \ - CalibrationImageMetadata, \ - CalibrationImageState, \ - CalibrationResultMetadata, \ - CalibrationResultState +from src.detector import \ + IntrinsicCalibrationCalculateRequest, \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationDeleteStagedRequest, \ + IntrinsicCalibrationImageGetRequest, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListRequest, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationImageMetadataUpdateRequest, \ + IntrinsicCalibrationResolutionListRequest, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetRequest, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultMetadataListRequest, \ + IntrinsicCalibrationResultMetadataListResponse, \ + IntrinsicCalibrationResultMetadataUpdateRequest from io import BytesIO import logging from typing import Optional @@ -53,7 +47,7 @@ logger = logging.getLogger(__name__) -class CalibratorPanel(BasePanel): +class IntrinsicsPanel(BasePanel): _controller: MCTController @@ -78,15 +72,15 @@ class CalibratorPanel(BasePanel): _calibration_in_progress: bool _force_last_result_selected: bool _detector_resolutions: list[ImageResolution] - _image_metadata_list: list[CalibrationImageMetadata] - _result_metadata_list: list[CalibrationResultMetadata] + _image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] + _result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] def __init__( self, parent: wx.Window, controller: MCTController, status_message_source: StatusMessageSource, - name: str = "CalibratorPanel" + name: str = "IntrinsicsPanel" ): super().__init__( parent=parent, @@ -156,7 +150,7 @@ def __init__( parent=control_panel, sizer=control_sizer, label="Image State", - selectable_values=[state.name for state in CalibrationImageState]) + selectable_values=[state.name for state in IntrinsicCalibrator.ImageState]) self._image_update_button: wx.Button = self.add_control_button( parent=control_panel, @@ -209,7 +203,7 @@ def __init__( parent=control_panel, sizer=control_sizer, label="Result State", - selectable_values=[state.name for state in CalibrationResultState]) + selectable_values=[state.name for state in IntrinsicCalibrator.ResultState]) self._result_update_button: wx.Button = self.add_control_button( parent=control_panel, @@ -284,17 +278,17 @@ def handle_response_series( ) -> None: response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationCalculateResponse): + if isinstance(response, IntrinsicCalibrationCalculateResponse): self._handle_response_calibrate(response=response) - elif isinstance(response, CalibrationImageGetResponse): + elif isinstance(response, IntrinsicCalibrationImageGetResponse): self._handle_response_get_calibration_image(response=response) - elif isinstance(response, CalibrationResultGetResponse): + elif isinstance(response, IntrinsicCalibrationResultGetResponse): self._handle_response_get_calibration_result(response=response) - elif isinstance(response, CalibrationResolutionListResponse): + elif isinstance(response, IntrinsicCalibrationResolutionListResponse): self._handle_response_list_calibration_detector_resolutions(response=response) - elif isinstance(response, CalibrationImageMetadataListResponse): + elif isinstance(response, IntrinsicCalibrationImageMetadataListResponse): self._handle_response_list_calibration_image_metadata(response=response) - elif isinstance(response, CalibrationResultMetadataListResponse): + elif isinstance(response, IntrinsicCalibrationResultMetadataListResponse): self._handle_response_list_calibration_result_metadata(response=response) elif isinstance(response, ErrorResponse): self.handle_error_response(response=response) @@ -326,7 +320,7 @@ def update_loop(self) -> None: def _handle_response_calibrate( self, - response: CalibrationCalculateResponse + response: IntrinsicCalibrationCalculateResponse ) -> None: if not self._calibration_in_progress: self.status_message_source.enqueue_status_message( @@ -336,19 +330,19 @@ def _handle_response_calibrate( self._calibrate_status_textbox.SetValue( f"Calibration {response.result_identifier} complete - values: " f"{str(response.intrinsic_calibration.calibrated_values.as_array())}") - self._result_display_textbox.SetValue(response.intrinsic_calibration.json(indent=4)) + self._result_display_textbox.SetValue(response.intrinsic_calibration.model_dump_json(indent=4)) self._calibration_in_progress = False self._force_last_result_selected = True def _handle_response_get_calibration_image( self, - response: CalibrationImageGetResponse + response: IntrinsicCalibrationImageGetResponse ) -> None: - opencv_image = ImageCoding.base64_to_image(input_base64=response.image_base64) + opencv_image = ImageUtils.base64_to_image(input_base64=response.image_base64) opencv_image = ImageUtils.image_resize_to_fit( opencv_image=opencv_image, available_size=self._image_panel.GetSize()) - image_buffer: bytes = ImageCoding.image_to_bytes(image_data=opencv_image, image_format=".jpg") + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=opencv_image, image_format=".jpg") image_buffer_io: BytesIO = BytesIO(image_buffer) wx_image: wx.Image = wx.Image(image_buffer_io) wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() @@ -357,13 +351,13 @@ def _handle_response_get_calibration_image( def _handle_response_get_calibration_result( self, - response: CalibrationResultGetResponse + response: IntrinsicCalibrationResultGetResponse ) -> None: - self._result_display_textbox.SetValue(str(response.intrinsic_calibration.json(indent=4))) + self._result_display_textbox.SetValue(str(response.intrinsic_calibration.model_dump_json(indent=4))) def _handle_response_list_calibration_detector_resolutions( self, - response: CalibrationResolutionListResponse + response: IntrinsicCalibrationResolutionListResponse ) -> None: self._detector_resolutions = sorted(response.resolutions) self._detector_resolution_selector.set_options([str(res) for res in self._detector_resolutions]) @@ -371,14 +365,14 @@ def _handle_response_list_calibration_detector_resolutions( def _handle_response_list_calibration_image_metadata( self, - response: CalibrationImageMetadataListResponse + response: IntrinsicCalibrationImageMetadataListResponse ) -> None: self._image_metadata_list = response.metadata_list self._image_table.update_contents(row_contents=self._image_metadata_list) def _handle_response_list_calibration_result_metadata( self, - response: CalibrationResultMetadataListResponse + response: IntrinsicCalibrationResultMetadataListResponse ) -> None: self._result_metadata_list = response.metadata_list self._result_table.update_contents(row_contents=self._result_metadata_list) @@ -394,9 +388,9 @@ def _on_calibrate_pressed(self, _event: wx.CommandEvent) -> None: selected_image_resolution: ImageResolution = \ ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationCalculateRequest( + IntrinsicCalibrationCalculateRequest( image_resolution=selected_image_resolution), - CalibrationResultMetadataListRequest( + IntrinsicCalibrationResultMetadataListRequest( image_resolution=selected_image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, @@ -411,7 +405,7 @@ def _on_detector_selected(self, _event: wx.CommandEvent) -> None: self._calibrate_status_textbox.SetValue(str()) self._result_display_textbox.SetValue(str()) detector_label: str = self._detector_selector.selector.GetStringSelection() - request_series: MCTRequestSeries = MCTRequestSeries(series=[CalibrationResolutionListRequest()]) + request_series: MCTRequestSeries = MCTRequestSeries(series=[IntrinsicCalibrationResolutionListRequest()]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, request_series=request_series) @@ -426,9 +420,9 @@ def _on_detector_load_pressed(self, _event: wx.CommandEvent) -> None: selected_image_resolution: ImageResolution = \ ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationImageMetadataListRequest( + IntrinsicCalibrationImageMetadataListRequest( image_resolution=selected_image_resolution), - CalibrationResultMetadataListRequest( + IntrinsicCalibrationResultMetadataListRequest( image_resolution=selected_image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, @@ -457,7 +451,7 @@ def _on_image_metadata_selected(self, _event: wx.grid.GridEvent) -> None: image_identifier: str | None = self._image_metadata_list[image_index].identifier if image_identifier is not None: request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationImageGetRequest(image_identifier=image_identifier)]) + IntrinsicCalibrationImageGetRequest(image_identifier=image_identifier)]) detector_label: str = self._detector_selector.selector.GetStringSelection() self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, @@ -471,16 +465,16 @@ def _on_image_update_pressed(self, _event: wx.CommandEvent) -> None: ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) image_index: int = self._image_table.get_selected_row_index() image_identifier: str = self._image_metadata_list[image_index].identifier - image_state: CalibrationImageState = \ - CalibrationImageState[self._image_state_selector.selector.GetStringSelection()] + image_state: IntrinsicCalibrator.ImageState = \ + IntrinsicCalibrator.ImageState[self._image_state_selector.selector.GetStringSelection()] image_label: str = self._image_label_textbox.textbox.GetValue() request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationImageMetadataUpdateRequest( + IntrinsicCalibrationImageMetadataUpdateRequest( image_identifier=image_identifier, image_state=image_state, image_label=image_label), - CalibrationDeleteStagedRequest(), - CalibrationImageMetadataListRequest( + IntrinsicCalibrationDeleteStagedRequest(), + IntrinsicCalibrationImageMetadataListRequest( image_resolution=image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, @@ -495,7 +489,7 @@ def _on_result_metadata_selected(self, _event: wx.grid.GridEvent) -> None: result_identifier: str | None = self._result_metadata_list[result_index].identifier if result_identifier is not None: request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationResultGetRequest(result_identifier=result_identifier)]) + IntrinsicCalibrationResultGetRequest(result_identifier=result_identifier)]) detector_label: str = self._detector_selector.selector.GetStringSelection() self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, @@ -509,16 +503,16 @@ def _on_result_update_pressed(self, _event: wx.CommandEvent) -> None: ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) result_index: int = self._result_table.get_selected_row_index() result_identifier: str = self._result_metadata_list[result_index].identifier - result_state: CalibrationResultState = \ - CalibrationResultState[self._result_state_selector.selector.GetStringSelection()] + result_state: IntrinsicCalibrator.ResultState = \ + IntrinsicCalibrator.ResultState[self._result_state_selector.selector.GetStringSelection()] result_label: str = self._result_label_textbox.textbox.GetValue() request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationResultMetadataUpdateRequest( + IntrinsicCalibrationResultMetadataUpdateRequest( result_identifier=result_identifier, result_state=result_state, result_label=result_label), - CalibrationDeleteStagedRequest(), - CalibrationResultMetadataListRequest( + IntrinsicCalibrationDeleteStagedRequest(), + IntrinsicCalibrationResultMetadataListRequest( image_resolution=image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, @@ -565,7 +559,7 @@ def _update_ui_controls(self) -> None: message=f"Selected image index {image_index} is out of bounds. Setting to None.") self._image_table.set_selected_row_index(None) else: - image_metadata: CalibrationImageMetadata = self._image_metadata_list[image_index] + image_metadata: IntrinsicCalibrator.ImageMetadata = self._image_metadata_list[image_index] self._image_label_textbox.Enable(True) self._image_label_textbox.textbox.SetValue(image_metadata.label) self._image_state_selector.Enable(True) @@ -573,7 +567,7 @@ def _update_ui_controls(self) -> None: self._image_update_button.Enable(True) calibration_image_count: int = 0 for image_metadata in self._image_metadata_list: - if image_metadata.state == CalibrationImageState.SELECT: + if image_metadata.state == IntrinsicCalibrator.ImageState.SELECT: calibration_image_count += 1 if calibration_image_count > 0: self._calibrate_button.Enable(True) @@ -593,7 +587,7 @@ def _update_ui_controls(self) -> None: message=f"Selected result index {result_index} is out of bounds. Setting to None.") self._result_table.set_selected_row_index(None) else: - result_metadata: CalibrationResultMetadata = self._result_metadata_list[result_index] + result_metadata: IntrinsicCalibrator.ResultMetadata = self._result_metadata_list[result_index] self._result_display_textbox.Enable(True) self._result_label_textbox.Enable(True) self._result_label_textbox.textbox.SetValue(result_metadata.label) diff --git a/src/gui/panels/pose_solver_panel.py b/src/gui/panels/pose_solver_panel.py index e07b06e..73ecca2 100644 --- a/src/gui/panels/pose_solver_panel.py +++ b/src/gui/panels/pose_solver_panel.py @@ -7,16 +7,16 @@ TrackingTable, \ TrackingTableRow from src.common import \ + DetectorFrame, \ ErrorResponse, \ EmptyResponse, \ + Matrix4x4, \ MCTResponse, \ MCTResponseSeries, \ - StatusMessageSource -from src.common.structures import \ - DetectorFrame, \ - Matrix4x4, \ + StatusMessageSource, \ + SeverityLabel, \ Pose, \ - PoseSolverFrame + MixerFrame from src.controller import \ MCTController import datetime @@ -43,7 +43,7 @@ class PoseSolverPanel(BasePanel): _control_blocking_request_id: uuid.UUID | None _is_updating: bool _latest_detector_frames: dict[str, DetectorFrame] # last frame for each detector - _latest_pose_solver_frames: dict[str, PoseSolverFrame] + _latest_pose_solver_frames: dict[str, MixerFrame] _target_id_to_label: dict[str, str] _tracked_target_poses: list[Pose] @@ -163,7 +163,7 @@ def handle_response_series( def on_page_select(self) -> None: super().on_page_select() selected_pose_solver_label: str = self._pose_solver_selector.selector.GetStringSelection() - available_pose_solver_labels: list[str] = self._controller.get_active_pose_solver_labels() + available_pose_solver_labels: list[str] = self._controller.get_active_mixer_labels() self._pose_solver_selector.set_options(option_list=available_pose_solver_labels) if selected_pose_solver_label in available_pose_solver_labels: self._pose_solver_selector.selector.SetStringSelection(selected_pose_solver_label) @@ -180,11 +180,11 @@ def on_tracking_row_selected(self, _event: wx.grid.GridEvent) -> None: selected_index: int | None = self._tracking_table.get_selected_row_index() if selected_index is not None: if 0 <= selected_index < len(self._tracked_target_poses): - display_text: str = self._tracked_target_poses[selected_index].json(indent=4) + display_text: str = self._tracked_target_poses[selected_index].model_dump_json(indent=4) self._tracking_display_textbox.SetValue(display_text) else: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Target index {selected_index} is out of bounds. Selection will be set to None.") self._tracking_table.set_selected_row_index(None) self._update_ui_controls() @@ -202,23 +202,23 @@ def update_loop(self) -> None: for detector_label in detector_labels: retrieved_detector_frame: DetectorFrame = self._controller.get_live_detector_frame( detector_label=detector_label) - retrieved_detector_frame_timestamp: datetime.datetime = retrieved_detector_frame.timestamp_utc() + retrieved_detector_frame_timestamp: datetime.datetime = retrieved_detector_frame.timestamp_utc if detector_label in self._latest_detector_frames: latest_detector_frame: DetectorFrame = self._latest_detector_frames[detector_label] - latest_detector_frame_timestamp: datetime.datetime = latest_detector_frame.timestamp_utc() + latest_detector_frame_timestamp: datetime.datetime = latest_detector_frame.timestamp_utc if retrieved_detector_frame_timestamp > latest_detector_frame_timestamp: self._latest_detector_frames[detector_label] = retrieved_detector_frame else: self._latest_detector_frames[detector_label] = retrieved_detector_frame new_poses_available: bool = False - pose_solver_labels: list[str] = self._controller.get_active_pose_solver_labels() + pose_solver_labels: list[str] = self._controller.get_active_mixer_labels() for pose_solver_label in pose_solver_labels: - retrieved_pose_solver_frame: PoseSolverFrame = self._controller.get_live_pose_solver_frame( + retrieved_pose_solver_frame: MixerFrame = self._controller.get_live_pose_solver_frame( pose_solver_label=pose_solver_label) retrieved_pose_solver_frame_timestamp: datetime.datetime = retrieved_pose_solver_frame.timestamp_utc() if pose_solver_label in self._latest_pose_solver_frames: - latest_pose_solver_frame: PoseSolverFrame = self._latest_pose_solver_frames[pose_solver_label] + latest_pose_solver_frame: MixerFrame = self._latest_pose_solver_frames[pose_solver_label] latest_pose_solver_frame_timestamp: datetime.datetime = latest_pose_solver_frame.timestamp_utc() if retrieved_pose_solver_frame_timestamp > latest_pose_solver_frame_timestamp: self._latest_pose_solver_frames[pose_solver_label] = retrieved_pose_solver_frame @@ -296,7 +296,7 @@ def _update_ui_controls(self) -> None: if tracked_target_index is not None: if tracked_target_index >= len(self._tracked_target_poses): self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Selected tracked target index {tracked_target_index} is out of bounds. " "Setting to None.") self._tracking_table.set_selected_row_index(None) diff --git a/src/gui/panels/specialized/calibration_image_table.py b/src/gui/panels/specialized/calibration_image_table.py index a4449a3..3e250e8 100644 --- a/src/gui/panels/specialized/calibration_image_table.py +++ b/src/gui/panels/specialized/calibration_image_table.py @@ -1,5 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.detector.structures import CalibrationImageMetadata +from src.common import IntrinsicCalibrator from typing import Final import wx @@ -12,7 +12,7 @@ _COL_LABELS: Final[list[str]] = ["Identifier", "Label", "Timestamp", "Status"] -class CalibrationImageTable(RowSelectionTable[CalibrationImageMetadata]): +class CalibrationImageTable(RowSelectionTable[IntrinsicCalibrator.ImageMetadata]): def __init__( self, parent: wx.Window, @@ -26,7 +26,7 @@ def __init__( def _set_row_contents( self, row_index: int, - row_content: CalibrationImageMetadata + row_content: IntrinsicCalibrator.ImageMetadata ): self.table.SetCellValue( row=row_index, @@ -39,7 +39,7 @@ def _set_row_contents( self.table.SetCellValue( row=row_index, col=_COL_IDX_TIMESTAMP, - s=str(row_content.timestamp_utc)) + s=str(row_content.timestamp_utc_iso8601)) self.table.SetCellValue( row=row_index, col=_COL_IDX_STATUS, diff --git a/src/gui/panels/specialized/calibration_result_table.py b/src/gui/panels/specialized/calibration_result_table.py index e782e12..d7e69e9 100644 --- a/src/gui/panels/specialized/calibration_result_table.py +++ b/src/gui/panels/specialized/calibration_result_table.py @@ -1,5 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.detector.structures import CalibrationResultMetadata +from src.common import IntrinsicCalibrator from typing import Final import wx @@ -12,7 +12,7 @@ _COL_LABELS: Final[list[str]] = ["Identifier", "Label", "Timestamp", "Status"] -class CalibrationResultTable(RowSelectionTable[CalibrationResultMetadata]): +class CalibrationResultTable(RowSelectionTable[IntrinsicCalibrator.ResultMetadata]): def __init__( self, parent: wx.Window, @@ -26,7 +26,7 @@ def __init__( def _set_row_contents( self, row_index: int, - row_content: CalibrationResultMetadata + row_content: IntrinsicCalibrator.ResultMetadata ): self.table.SetCellValue( row=row_index, diff --git a/src/gui/panels/specialized/connection_table.py b/src/gui/panels/specialized/connection_table.py index ff13e86..7676df5 100644 --- a/src/gui/panels/specialized/connection_table.py +++ b/src/gui/panels/specialized/connection_table.py @@ -1,6 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.controller import \ - ConnectionReport +from src.controller import Connection from typing import Final import wx @@ -14,7 +13,7 @@ _COL_LABELS: Final[list[str]] = ["Label", "Role", "IP Address", "Port", "Status"] -class ConnectionTable(RowSelectionTable[ConnectionReport]): +class ConnectionTable(RowSelectionTable[Connection.Report]): def __init__( self, parent: wx.Window, @@ -28,7 +27,7 @@ def __init__( def _set_row_contents( self, row_index: int, - row_content: ConnectionReport + row_content: Connection.Report ): self.table.SetCellValue( row=row_index, diff --git a/src/gui/panels/specialized/graphics_renderer.py b/src/gui/panels/specialized/graphics_renderer.py index 9ca7390..6c7c8ec 100644 --- a/src/gui/panels/specialized/graphics_renderer.py +++ b/src/gui/panels/specialized/graphics_renderer.py @@ -1,4 +1,4 @@ -from src.common.structures import Matrix4x4 +from src.common import Matrix4x4 from src.gui.graphics import Constants, FileIO, Material, Model, Shader import datetime import hjson @@ -129,8 +129,8 @@ def __init__( handler=self._on_resize) self._context = GLContext(win=self) - self._last_render_datetime_utc = datetime.datetime.utcnow() - self._last_update_datetime_utc = datetime.datetime.utcnow() + self._last_render_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) + self._last_update_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) self._shaders = set() self._model_dictionary = dict() self._scene_objects = list() @@ -208,8 +208,8 @@ def load_models_into_context_from_data_path(self) -> dict[str, Model]: geometry_filename = f"{model_part_io.geometry_label}.stl" geometry_filepath = os.path.join(geometry_path, geometry_filename) geometry = stl.mesh.Mesh.from_file(geometry_filepath) - vertices: list[numpy.array] = list() - normals: list[numpy.array] = list() + vertices: list[numpy.ndarray] = list() + normals: list[numpy.ndarray] = list() triangles: list[list[int]] = list() for i, face in enumerate(geometry.vectors): for vertex in face: @@ -243,10 +243,10 @@ def render(self): scene_object.model.draw(translation=translation, rotation_quaternion=list(rotation_quaternion)) self.SwapBuffers() - self._last_render_datetime_utc = datetime.datetime.utcnow() + self._last_render_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) def _compute_seconds_since_last_update(self) -> float: - return (datetime.datetime.utcnow() - self._last_update_datetime_utc).total_seconds() + return (datetime.datetime.now(tz=datetime.timezone.utc) - self._last_update_datetime_utc).total_seconds() @staticmethod def _compute_mouse_deltas( @@ -337,7 +337,7 @@ def _on_mouse_moved(self, event: wx.MouseEvent): numpy.array(self._perspective_target) + delta_y_millimeters * translation_y_vector) self._update_world_to_view() - self._last_update_datetime_utc = datetime.datetime.utcnow() + self._last_update_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) def _on_mouse_wheel(self, event: wx.MouseEvent): delta_time_seconds: float = self._compute_seconds_since_last_update() @@ -358,7 +358,7 @@ def _on_mouse_wheel(self, event: wx.MouseEvent): numpy.array(self._perspective_target) - delta_z_millimeters * translation_z_vector) self._update_world_to_view() - self._last_update_datetime_utc = datetime.datetime.utcnow() + self._last_update_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) def _on_resize(self, _evt: wx.SizeEvent): self._update_viewport() diff --git a/src/gui/panels/specialized/log_panel.py b/src/gui/panels/specialized/log_panel.py index 5f18c49..3b2bf32 100644 --- a/src/gui/panels/specialized/log_panel.py +++ b/src/gui/panels/specialized/log_panel.py @@ -1,4 +1,4 @@ -from src.common.structures import StatusMessage +from src.common import StatusMessage import wx import wx.grid diff --git a/src/detector/implementations/__init__.py b/src/implementations/__init__.py similarity index 100% rename from src/detector/implementations/__init__.py rename to src/implementations/__init__.py diff --git a/src/implementations/annotator_aruco_opencv.py b/src/implementations/annotator_aruco_opencv.py new file mode 100644 index 0000000..fb3458d --- /dev/null +++ b/src/implementations/annotator_aruco_opencv.py @@ -0,0 +1,88 @@ +from .common_aruco_opencv import ArucoOpenCVCommon +from src.common import \ + Annotator, \ + Annotation, \ + KeyValueMetaAny, \ + KeyValueSimpleAny, \ + MCTAnnotatorRuntimeError, \ + SeverityLabel, \ + StatusMessageSource +import cv2.aruco +import datetime +import logging +import numpy +from typing import Optional + + +logger = logging.getLogger(__name__) + + +class ArucoOpenCVAnnotator(Annotator): + + _aruco_dictionary: Optional # created by OpenCV, type cv2.aruco.Dictionary + _aruco_parameters: ... # created by OpenCV, type cv2.aruco.DetectorParameters + _snapshots_identified: list[Annotation] # Markers that are determined to be valid, and are identified + _snapshots_unidentified: list[Annotation] # Looked at first like markers but got filtered out + _update_timestamp_utc: datetime.datetime + + def __init__( + self, + configuration: Annotator.Configuration, + status_message_source: StatusMessageSource + ): + super().__init__( + configuration=configuration, + status_message_source=status_message_source) + + self._aruco_dictionary = ArucoOpenCVCommon.standard_aruco_dictionary() + self._aruco_parameters = ArucoOpenCVCommon.standard_aruco_detection_parameters() + self._snapshots_identified = list() + self._snapshots_unidentified = list() + self._update_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + self.set_status(Annotator.Status.RUNNING) # Always running + + def get_changed_timestamp(self) -> datetime.datetime: + return self._update_timestamp_utc + + def get_markers_detected(self) -> list[Annotation]: + return self._snapshots_identified + + def get_markers_rejected(self) -> list[Annotation]: + return self._snapshots_unidentified + + def get_parameters(self) -> list[KeyValueMetaAny]: + return ArucoOpenCVCommon.assign_aruco_detection_parameters_to_key_value_list(self._aruco_parameters) + + @staticmethod + def get_type_identifier() -> str: + return "aruco_opencv" + + # noinspection DuplicatedCode + def set_parameters( + self, + parameters: list[KeyValueSimpleAny] + ) -> None: + mismatched_keys: list[str] = ArucoOpenCVCommon.assign_key_value_list_to_aruco_detection_parameters( + detection_parameters=self._aruco_parameters, + key_value_list=parameters) + if len(mismatched_keys) > 0: + raise MCTAnnotatorRuntimeError( + message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") + + def update( + self, + image: numpy.ndarray + ) -> None: + if self._aruco_dictionary is None: + message: str = "No ArUco dictionary has been set." + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) + self.set_status(Annotator.Status.FAILURE) + return + image_greyscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + self._snapshots_identified, self._snapshots_unidentified = ArucoOpenCVCommon.annotations_from_greyscale_image( + aruco_detector_parameters=self._aruco_parameters, + aruco_dictionary=self._aruco_dictionary, + image_greyscale=image_greyscale) + self._update_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/implementations/camera_opencv_capture_device.py b/src/implementations/camera_opencv_capture_device.py similarity index 86% rename from src/detector/implementations/camera_opencv_capture_device.py rename to src/implementations/camera_opencv_capture_device.py index 587061c..cc9f1e4 100644 --- a/src/detector/implementations/camera_opencv_capture_device.py +++ b/src/implementations/camera_opencv_capture_device.py @@ -1,12 +1,6 @@ -from ..exceptions import MCTDetectorRuntimeError -from ..interfaces import AbstractCamera -from ..structures import \ - CameraConfiguration, \ - CameraStatus from src.common import \ - StandardResolutions, \ - StatusMessageSource -from src.common.structures import \ + Camera, \ + ImageUtils, \ ImageResolution, \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ @@ -18,12 +12,15 @@ KeyValueMetaBool, \ KeyValueMetaEnum, \ KeyValueMetaFloat, \ - KeyValueMetaInt + KeyValueMetaInt, \ + MCTCameraRuntimeError, \ + StatusMessageSource, SeverityLabel import cv2 import datetime import logging import numpy import os +from pydantic import Field from typing import Final @@ -35,7 +32,8 @@ # This list is by no means exhaustive, but it should probably # capture a reasonable cross-section of commonly-used camera image resolutions. # Ideally we can query the camera/driver for supported resolutions and use that instead of this list. -_CAMERA_RESOLUTION_ALLOWABLE: Final[list[str]] = [str(resolution) for resolution in StandardResolutions.as_list()] +_CAMERA_RESOLUTION_ALLOWABLE: Final[list[str]] = [ + str(resolution) for resolution in ImageUtils.StandardResolutions.as_list()] _CAMERA_FPS_KEY: Final[str] = "FramesPerSecond" _CAMERA_FPS_DEFAULT: Final[float] = 30.0 _CAMERA_FPS_RANGE_MINIMUM: Final[float] = 1.0 @@ -69,7 +67,13 @@ _CAMERA_GAMMA_RANGE_MAXIMUM: Final[int] = 300 -class OpenCVCaptureDeviceCamera(AbstractCamera): +class _Configuration(Camera.Configuration): + capture_device: int = Field() # Not used by all drivers (notably it IS used by OpenCV) + + +class OpenCVCaptureDeviceCamera(Camera): + + Configuration: type[_Configuration] = _Configuration _capture: cv2.VideoCapture | None _capture_device_id: str | int @@ -79,17 +83,17 @@ class OpenCVCaptureDeviceCamera(AbstractCamera): def __init__( self, - configuration: CameraConfiguration, + configuration: Camera.Configuration, status_message_source: StatusMessageSource ): super().__init__( configuration=configuration, status_message_source=status_message_source) self._image = None - self._image_timestamp_utc = datetime.datetime.min + self._image_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) self._capture = None self._capture_device_id = configuration.capture_device - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) def __del__(self): if self._capture is not None: @@ -100,19 +104,19 @@ def get_changed_timestamp(self) -> datetime.datetime: def get_image(self) -> numpy.ndarray: if self._image is None: - raise MCTDetectorRuntimeError(message="There is no captured image.") + raise MCTCameraRuntimeError(message="There is no captured image.") return self._image def get_resolution(self) -> ImageResolution: if self._capture is None: - raise MCTDetectorRuntimeError(message="The camera is not active, and resolution cannot be retrieved.") + raise MCTCameraRuntimeError(message="The camera is not active, and resolution cannot be retrieved.") return ImageResolution( x_px=int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)), y_px=int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) def get_parameters(self) -> list[KeyValueMetaAbstract]: if self._capture is None: - raise MCTDetectorRuntimeError(message="The camera is not active, and properties cannot be retrieved.") + raise MCTCameraRuntimeError(message="The camera is not active, and properties cannot be retrieved.") return_value: list[KeyValueMetaAbstract] = list() @@ -183,7 +187,7 @@ def get_type_identifier() -> str: def set_parameters(self, parameters: list[KeyValueSimpleAny]) -> None: if self._capture is None: - raise MCTDetectorRuntimeError(message="Capture is None.") + raise MCTCameraRuntimeError(message="Capture is None.") mismatched_keys: list[str] = list() @@ -239,7 +243,7 @@ def set_parameters(self, parameters: list[KeyValueSimpleAny]) -> None: mismatched_keys.append(key_value.key) if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( + raise MCTCameraRuntimeError( message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") def start(self) -> None: @@ -253,13 +257,13 @@ def start(self) -> None: elif os.name == "posix": self._capture = cv2.VideoCapture(self._capture_device_id, cv2.CAP_V4L2) else: - raise MCTDetectorRuntimeError( + raise MCTCameraRuntimeError( message=f"The current platform ({os.name}) is not supported.") # NOTE: The USB3 cameras bought for this project appear to require some basic parameters to be set, # otherwise frame grab results in error default_resolution: ImageResolution = ImageResolution.from_str(_CAMERA_RESOLUTION_DEFAULT) - self._capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) + self._capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')) self._capture.set(cv2.CAP_PROP_FRAME_WIDTH, float(default_resolution.x_px)) self._capture.set(cv2.CAP_PROP_FRAME_HEIGHT, float(default_resolution.y_px)) self._capture.set(cv2.CAP_PROP_FPS, float(_CAMERA_FPS_DEFAULT)) @@ -275,29 +279,33 @@ def start(self) -> None: self._capture.set(cv2.CAP_PROP_SHARPNESS, float(_CAMERA_SHARPNESS_DEFAULT)) self._capture.set(cv2.CAP_PROP_GAMMA, float(_CAMERA_GAMMA_DEFAULT)) - self.set_status(CameraStatus.RUNNING) + self.set_status(Camera.Status.RUNNING) def stop(self) -> None: if self._capture is not None: self._capture.release() self._capture = None - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) def update(self) -> None: grabbed_frame: bool grabbed_frame = self._capture.grab() if not grabbed_frame: message: str = "Failed to grab frame." - self.add_status_message(severity="error", message=message) - self.set_status(CameraStatus.FAILURE) - raise MCTDetectorRuntimeError(message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) + self.set_status(Camera.Status.FAILURE) + raise MCTCameraRuntimeError(message=message) retrieved_frame: bool retrieved_frame, self._image = self._capture.retrieve() if not retrieved_frame: message: str = "Failed to retrieve frame." - self.add_status_message(severity="error", message=message) - self.set_status(CameraStatus.FAILURE) - raise MCTDetectorRuntimeError(message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) + self.set_status(Camera.Status.FAILURE) + raise MCTCameraRuntimeError(message=message) - self._image_timestamp_utc = datetime.datetime.utcnow() + self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/implementations/camera_picamera2.py b/src/implementations/camera_picamera2.py similarity index 91% rename from src/detector/implementations/camera_picamera2.py rename to src/implementations/camera_picamera2.py index 52773ff..220b167 100644 --- a/src/detector/implementations/camera_picamera2.py +++ b/src/implementations/camera_picamera2.py @@ -1,10 +1,5 @@ -from ..exceptions import MCTDetectorRuntimeError -from ..interfaces import AbstractCamera -from ..structures import \ - CameraConfiguration, \ - CameraStatus -from src.common import StatusMessageSource -from src.common.structures import \ +from src.common import \ + Camera, \ ImageResolution, \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ @@ -14,7 +9,10 @@ KeyValueMetaAbstract, \ KeyValueMetaBool, \ KeyValueMetaFloat, \ - KeyValueMetaInt + KeyValueMetaInt, \ + MCTCameraRuntimeError, \ + SeverityLabel, \ + StatusMessageSource import datetime import logging import numpy @@ -75,7 +73,7 @@ _PICAMERA2_SHARPNESS_KEY: Final[str] = "Sharpness" -class Picamera2Camera(AbstractCamera): +class Picamera2Camera(Camera): _camera: Picamera2 _camera_configuration: Picamera2Configuration @@ -85,29 +83,29 @@ class Picamera2Camera(AbstractCamera): def __init__( self, - configuration: CameraConfiguration, + configuration: Camera.Configuration, status_message_source: StatusMessageSource ): super().__init__( configuration=configuration, status_message_source=status_message_source) self._image = None - self._image_timestamp_utc = datetime.datetime.min + self._image_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) self._camera = Picamera2() self._camera_configuration = self._camera.create_video_configuration() - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) def get_changed_timestamp(self) -> datetime.datetime: return self._image_timestamp_utc def get_image(self) -> numpy.ndarray: if self._image is None: - raise MCTDetectorRuntimeError(message="There is no captured image.") + raise MCTCameraRuntimeError(message="There is no captured image.") return self._image def get_parameters(self, **_kwargs) -> list[KeyValueMetaAbstract]: - if self.get_status() != CameraStatus.RUNNING: - raise MCTDetectorRuntimeError(message="The capture is not active, and properties cannot be retrieved.") + if self.get_status() != Camera.Status.RUNNING: + raise MCTCameraRuntimeError(message="The capture is not active, and properties cannot be retrieved.") current_controls: dict = { # Custom settings shall override default values @@ -223,10 +221,10 @@ def set_parameters(self, parameters: list[KeyValueSimpleAny]) -> None: mismatched_keys.append(key_value.key) if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( + raise MCTCameraRuntimeError( message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") - if self.get_status() == CameraStatus.RUNNING: + if self.get_status() == Camera.Status.RUNNING: self._camera.stop() self._camera.configure(self._camera_configuration) self._camera.start() @@ -260,12 +258,12 @@ def start(self) -> None: self._camera.configure(self._camera_configuration) self._camera.start() self._image = self._camera.capture_array() - self.set_status(CameraStatus.RUNNING) + self.set_status(Camera.Status.RUNNING) def stop(self) -> None: if self._image is not None: self._image = None - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) self._camera.stop() def update(self) -> None: @@ -273,8 +271,10 @@ def update(self) -> None: if self._image is None: message: str = "Failed to grab frame." - self.add_status_message(severity="error", message=message) - self.set_status(CameraStatus.FAILURE) - raise MCTDetectorRuntimeError(message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) + self.set_status(Camera.Status.FAILURE) + raise MCTCameraRuntimeError(message=message) - self._image_timestamp_utc = datetime.datetime.utcnow() + self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/implementations/common_aruco_opencv.py b/src/implementations/common_aruco_opencv.py new file mode 100644 index 0000000..290ac27 --- /dev/null +++ b/src/implementations/common_aruco_opencv.py @@ -0,0 +1,715 @@ +from src.common import \ + Annotation, \ + KeyValueMetaAny, \ + KeyValueMetaBool, \ + KeyValueMetaEnum, \ + KeyValueMetaFloat, \ + KeyValueMetaInt, \ + KeyValueSimpleAbstract, \ + KeyValueSimpleAny, \ + KeyValueSimpleBool, \ + KeyValueSimpleFloat, \ + KeyValueSimpleInt, \ + KeyValueSimpleString, \ + Landmark, \ + MathUtils, \ + MCTSerializationError, \ + Target +import cv2.aruco +import logging +import numpy +from typing import Final, get_args, Literal + + +logger = logging.getLogger(__name__) + + +# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html +# for documentation on individual parameters + +# Adaptive Thresholding +_KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = "adaptiveThreshWinSizeMin" +_KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = "adaptiveThreshWinSizeMax" +_KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = "adaptiveThreshWinSizeStep" +_KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = "adaptiveThreshConstant" +# Contour Filtering +_KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = "minMarkerPerimeterRate" # Marker size ratio +_KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = "maxMarkerPerimeterRate" +_KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = "polygonalApproxAccuracyRate" # Square tolerance ratio +_KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = "minCornerDistanceRate" # Corner separation ratio +_KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = "minMarkerDistanceRate" # Marker separation ratio +_KEY_MIN_DISTANCE_TO_BORDER: Final[str] = "minDistanceToBorder" # Border distance in pixels +# Bits Extraction +_KEY_MARKER_BORDER_BITS: Final[str] = "markerBorderBits" # Border width (px) +_KEY_MIN_OTSU_STDDEV: Final[str] = "minOtsuStdDev" # Minimum brightness stdev +_KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = "perspectiveRemovePixelPerCell" # Bit Sampling Rate +_KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = "perspectiveRemoveIgnoredMarginPerCell" # Bit Margin Ratio +# Marker Identification +_KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = "maxErroneousBitsInBorderRate" # Border Error Rate +_KEY_ERROR_CORRECTION_RATE: Final[str] = "errorCorrectionRate" # Error Correction Rat +_KEY_DETECT_INVERTED_MARKER: Final[str] = "detectInvertedMarker" +_KEY_CORNER_REFINEMENT_METHOD: Final[str] = "cornerRefinementMethod" +_KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = "cornerRefinementWinSize" +_KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = "cornerRefinementMaxIterations" +_KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = "cornerRefinementMinAccuracy" +# April Tag Only +_KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = "aprilTagCriticalRad" +_KEY_APRIL_TAG_DEGLITCH: Final[str] = "aprilTagDeglitch" +_KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = "aprilTagMaxLineFitMse" +_KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = "aprilTagMaxNmaxima" +_KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = "aprilTagMinClusterPixels" +_KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = "aprilTagMinWhiteBlackDiff" +_KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = "aprilTagQuadDecimate" +_KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = "aprilTagQuadSigma" +# ArUco 3 +_KEY_USE_ARUCO_3_DETECTION: Final[str] = "useAruco3Detection" +_KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = "minMarkerLengthRatioOriginalImg" +_KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = "minSideLengthCanonicalImg" + + +class ArucoOpenCVCommon: + """ + A "class" to group related static functions and constants, like in a namespace. + The class itself is not meant to be instantiated. + """ + + def __init__(self): + raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") + + + KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = _KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN + KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = _KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX + KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = _KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP + KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = _KEY_ADAPTIVE_THRESH_CONSTANT + KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = _KEY_MIN_MARKER_PERIMETER_RATE + KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = _KEY_MAX_MARKER_PERIMETER_RATE + KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = _KEY_POLYGONAL_APPROX_ACCURACY_RATE + KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = _KEY_MIN_CORNER_DISTANCE_RATE + KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = _KEY_MIN_MARKER_DISTANCE_RATE + KEY_MIN_DISTANCE_TO_BORDER: Final[str] = _KEY_MIN_DISTANCE_TO_BORDER + KEY_MARKER_BORDER_BITS: Final[str] = _KEY_MARKER_BORDER_BITS + KEY_MIN_OTSU_STDDEV: Final[str] = _KEY_MIN_OTSU_STDDEV + KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = _KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL + KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = _KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL + KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = _KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE + KEY_ERROR_CORRECTION_RATE: Final[str] = _KEY_ERROR_CORRECTION_RATE + KEY_DETECT_INVERTED_MARKER: Final[str] = _KEY_DETECT_INVERTED_MARKER + KEY_CORNER_REFINEMENT_METHOD: Final[str] = _KEY_CORNER_REFINEMENT_METHOD + KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = _KEY_CORNER_REFINEMENT_WIN_SIZE + KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = _KEY_CORNER_REFINEMENT_MAX_ITERATIONS + KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = _KEY_CORNER_REFINEMENT_MIN_ACCURACY + KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = _KEY_APRIL_TAG_CRITICAL_RAD + KEY_APRIL_TAG_DEGLITCH: Final[str] = _KEY_APRIL_TAG_DEGLITCH + KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = _KEY_APRIL_TAG_MAX_LINE_FIT_MSE + KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = _KEY_APRIL_TAG_MAX_N_MAXIMA + KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = _KEY_APRIL_TAG_MIN_CLUSTER_PIXELS + KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = _KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF + KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = _KEY_APRIL_TAG_QUAD_DECIMATE + KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = _KEY_APRIL_TAG_QUAD_SIGMA + KEY_USE_ARUCO_3_DETECTION: Final[str] = _KEY_USE_ARUCO_3_DETECTION + KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = _KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG + KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = _KEY_MIN_SIDE_LENGTH_CANONICAL_IMG + + CornerRefinementMethod = Literal["NONE", "SUBPIX", "CONTOUR", "APRILTAG"] + CORNER_REFINEMENT_METHOD_NONE: Final[str] = 'NONE' + CORNER_REFINEMENT_METHOD_SUBPIX: Final[str] = 'SUBPIX' + CORNER_REFINEMENT_METHOD_CONTOUR: Final[str] = 'CONTOUR' + CORNER_REFINEMENT_METHOD_APRILTAG: Final[str] = 'APRILTAG' + + CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: dict[CornerRefinementMethod, int] = { + "NONE": cv2.aruco.CORNER_REFINE_NONE, + "SUBPIX": cv2.aruco.CORNER_REFINE_SUBPIX, + "CONTOUR": cv2.aruco.CORNER_REFINE_CONTOUR, + "APRILTAG": cv2.aruco.CORNER_REFINE_APRILTAG} + + CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: dict[int, CornerRefinementMethod] = { + cv2.aruco.CORNER_REFINE_NONE: "NONE", + cv2.aruco.CORNER_REFINE_SUBPIX: "SUBPIX", + cv2.aruco.CORNER_REFINE_CONTOUR: "CONTOUR", + cv2.aruco.CORNER_REFINE_APRILTAG: "APRILTAG"} + + class CharucoBoard: + dictionary_name: str + square_count_x: int + square_count_y: int + square_size_px: int + marker_size_px: int + px_per_mm: float + + def __init__(self): + self.dictionary_name = "DICT_4X4_100" + self.square_count_x = 8 + self.square_count_y = 10 + self.square_size_px = 800 + self.marker_size_px = 400 + self.px_per_mm = 40 + + def aruco_dictionary(self) -> cv2.aruco.Dictionary: + if self.dictionary_name != "DICT_4X4_100": + raise NotImplementedError("Only DICT_4X4_100 is currently implemented") + aruco_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) + return aruco_dictionary + + def as_target( + self, + target_label: str + ) -> Target: + """ + Note that the coordinates assume the same axes as get_marker_center_points, + but the origin is in the center of the board, not the bottom-left corner. + """ + corner_points: list[list[float]] = self.get_marker_corner_points() + marker_count: int = len(corner_points) // 4 + landmarks: list[Landmark] = list() + for marker_index in range(0, marker_count): + for corner_index in range(0, 4): + landmarks.append(Landmark( + feature_label=f"{marker_index}{Landmark.RELATION_CHARACTER}{corner_index}", + x=corner_points[marker_index*4+corner_index][0], + y=corner_points[marker_index*4+corner_index][1], + z=corner_points[marker_index*4+corner_index][2])) + return Target( + label=target_label, + landmarks=landmarks) + + def size_px(self) -> tuple[float, float]: + board_size_x_px = self.square_count_x * self.square_size_px + board_size_y_px = self.square_count_y * self.square_size_px + return board_size_x_px, board_size_y_px + + def size_mm(self) -> tuple[float, float]: + board_size_x_mm = self.square_count_x * self.square_size_px / self.px_per_mm + board_size_y_mm = self.square_count_y * self.square_size_px / self.px_per_mm + return board_size_x_mm, board_size_y_mm + + def create_board(self) -> ...: # type cv2.aruco.CharucoBoard + charuco_board = cv2.aruco.CharucoBoard( + size=(self.square_count_x, self.square_count_y), + squareLength=self.square_size_px, + markerLength=self.marker_size_px, + dictionary=self.aruco_dictionary()) + return charuco_board + + def get_marker_center_points(self) -> list[list[float]]: + """ + Note that the coordinates assume (based on portrait orientation): + origin: at bottom-left of board + x-axis: goes right + y-axis: goes up the page + z-axis: comes out of the image and toward the viewer + """ + points = [] + for y in range(self.square_count_y): + for x in range(self.square_count_x): + if (x + y) % 2 == 1: # Only add the points for the white squares + point_x = (x + 0.5) * self.square_size_px / self.px_per_mm + point_y = (self.square_count_y - y - 0.5) * self.square_size_px / self.px_per_mm + points.append([point_x, point_y, 0.0]) + return points + + def get_marker_corner_points(self) -> list[list[float]]: + """ + Note that the coordinates assume the same axes as get_marker_center_points, + but the origin is in the center of the board, not the bottom-left corner. + """ + points = [] + marker_size_mm: float = self.marker_size_px / self.px_per_mm + square_size_mm: float = self.square_size_px / self.px_per_mm + for y_sq in range(self.square_count_y): + for x_sq in range(self.square_count_x): + if (x_sq + y_sq) % 2 == 1: # Only add the points for the white squares + x_sq_centered: float = x_sq - (self.square_count_x / 2.0) + y_sq_centered: float = y_sq - (self.square_count_y / 2.0) + for corner_index in range(0, 4): + x_mm: float = (x_sq_centered + 0.5) * square_size_mm + if corner_index == 0 or corner_index == 3: + x_mm -= (marker_size_mm / 2.0) + else: + x_mm += (marker_size_mm / 2.0) + y_mm: float = (-(y_sq_centered + 0.5)) * square_size_mm + if corner_index == 0 or corner_index == 1: + y_mm += (marker_size_mm / 2.0) + else: + y_mm -= (marker_size_mm / 2.0) + z_mm: float = 0.0 + points.append([x_mm, y_mm, z_mm]) + return points + + def get_marker_ids(self) -> list[int]: + num_markers = self.square_count_x * self.square_count_y // 2 + return list(range(num_markers)) + + @staticmethod + def annotations_from_greyscale_image( + aruco_detector_parameters: cv2.aruco.DetectorParameters, + aruco_dictionary: cv2.aruco.Dictionary, + image_greyscale: numpy.ndarray + ) -> tuple[list[Annotation], list[Annotation]]: + (detected_corner_points_raw, detected_dictionary_indices, rejected_corner_points_raw) = cv2.aruco.detectMarkers( + image=image_greyscale, + dictionary=aruco_dictionary, + parameters=aruco_detector_parameters) + + detected_annotations: list[Annotation] = list() + # note: detected_indices is (inconsistently) None sometimes if nothing is detected + if detected_dictionary_indices is not None and len(detected_dictionary_indices) > 0: + detected_count = detected_dictionary_indices.size + # Shape of some output was previously observed to (also) be inconsistent... make it consistent here: + detected_corner_points_px = numpy.array(detected_corner_points_raw).reshape((detected_count, 4, 2)) + detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_count)) + for detected_index, detected_id in enumerate(detected_dictionary_indices): + for corner_index in range(4): + detected_label: str = f"{detected_id}{Annotation.RELATION_CHARACTER}{corner_index}" + detected_annotations.append(Annotation( + feature_label=detected_label, + x_px=float(detected_corner_points_px[detected_index][corner_index][0]), + y_px=float(detected_corner_points_px[detected_index][corner_index][1]))) + + rejected_annotations: list[Annotation] = list() + if rejected_corner_points_raw: + rejected_corner_points_px = numpy.array(rejected_corner_points_raw).reshape((-1, 4, 2)) + for rejected_index in range(rejected_corner_points_px.shape[0]): + for corner_index in range(4): + rejected_annotations.append(Annotation( + feature_label=Annotation.UNIDENTIFIED_LABEL, + x_px=float(rejected_corner_points_px[rejected_index][corner_index][0]), + y_px=float(rejected_corner_points_px[rejected_index][corner_index][1]))) + + return detected_annotations, rejected_annotations + + @staticmethod + def assign_aruco_detection_parameters_to_key_value_list( + detection_parameters: ... # cv2.aruco.DetectionParameters + ) -> list[KeyValueMetaAny]: + + return_value: list[KeyValueMetaAny] = list() + + return_value.append(KeyValueMetaInt( + key=_KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN, + value=detection_parameters.adaptiveThreshWinSizeMin, + range_minimum=1, + range_maximum=99)) + + return_value.append(KeyValueMetaInt( + key=_KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX, + value=detection_parameters.adaptiveThreshWinSizeMax, + range_minimum=1, + range_maximum=99)) + + return_value.append(KeyValueMetaInt( + key=_KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP, + value=detection_parameters.adaptiveThreshWinSizeStep, + range_minimum=1, + range_maximum=99, + range_step=2)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_ADAPTIVE_THRESH_CONSTANT, + value=detection_parameters.adaptiveThreshConstant, + range_minimum=-255.0, + range_maximum=255.0, + range_step=1.0)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_MARKER_PERIMETER_RATE, + value=detection_parameters.minMarkerPerimeterRate, + range_minimum=0, + range_maximum=8.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MAX_MARKER_PERIMETER_RATE, + value=detection_parameters.maxMarkerPerimeterRate, + range_minimum=0.0, + range_maximum=8.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_POLYGONAL_APPROX_ACCURACY_RATE, + value=detection_parameters.polygonalApproxAccuracyRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_CORNER_DISTANCE_RATE, + value=detection_parameters.minCornerDistanceRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_MARKER_DISTANCE_RATE, + value=detection_parameters.minMarkerDistanceRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=_KEY_MIN_DISTANCE_TO_BORDER, + value=detection_parameters.minDistanceToBorder, + range_minimum=0, + range_maximum=512)) + + return_value.append(KeyValueMetaInt( + key=_KEY_MARKER_BORDER_BITS, + value=detection_parameters.markerBorderBits, + range_minimum=1, + range_maximum=9)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_OTSU_STDDEV, + value=detection_parameters.minOtsuStdDev, + range_minimum=0.0, + range_maximum=256.0, + range_step=1.0)) + + return_value.append(KeyValueMetaInt( + key=_KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL, + value=detection_parameters.perspectiveRemovePixelPerCell, + range_minimum=1, + range_maximum=20)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL, + value=detection_parameters.perspectiveRemoveIgnoredMarginPerCell, + range_minimum=0.0, + range_maximum=0.5, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE, + value=detection_parameters.maxErroneousBitsInBorderRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_ERROR_CORRECTION_RATE, + value=detection_parameters.errorCorrectionRate, + range_minimum=-0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaBool( + key=_KEY_DETECT_INVERTED_MARKER, + value=detection_parameters.detectInvertedMarker)) + + if detection_parameters.cornerRefinementMethod not in \ + ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: + message: str = f"Corner refinement method appears to be set to an invalid value: " \ + f"{detection_parameters.corner_refinement_method}." + logger.error(message) + raise MCTSerializationError(message=message) + corner_refinement_method_text: ArucoOpenCVCommon.CornerRefinementMethod = \ + ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT[ + detection_parameters.cornerRefinementMethod] + return_value.append(KeyValueMetaEnum( + key=_KEY_CORNER_REFINEMENT_METHOD, + value=corner_refinement_method_text, + allowable_values=list(get_args(ArucoOpenCVCommon.CornerRefinementMethod)))) + + return_value.append(KeyValueMetaInt( + key=_KEY_CORNER_REFINEMENT_WIN_SIZE, + value=detection_parameters.cornerRefinementWinSize, + range_minimum=1, + range_maximum=9)) + + return_value.append(KeyValueMetaInt( + key=_KEY_CORNER_REFINEMENT_MAX_ITERATIONS, + value=detection_parameters.cornerRefinementMaxIterations, + range_minimum=1, + range_maximum=100)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_CORNER_REFINEMENT_MIN_ACCURACY, + value=detection_parameters.cornerRefinementMinAccuracy, + range_minimum=0.0, + range_maximum=5.0, + range_step=0.1)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_CRITICAL_RAD, + value=detection_parameters.aprilTagCriticalRad, + range_minimum=-0.0, + range_maximum=numpy.pi, + range_step=numpy.pi / 20.0)) + + return_value.append(KeyValueMetaBool( + key=_KEY_APRIL_TAG_DEGLITCH, + value=detection_parameters.aprilTagDeglitch)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_MAX_LINE_FIT_MSE, + value=detection_parameters.aprilTagMaxLineFitMse, + range_minimum=0.0, + range_maximum=512.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=_KEY_APRIL_TAG_MAX_N_MAXIMA, + value=detection_parameters.aprilTagMaxNmaxima, + range_minimum=1, + range_maximum=100)) + + return_value.append(KeyValueMetaInt( + key=_KEY_APRIL_TAG_MIN_CLUSTER_PIXELS, + value=detection_parameters.aprilTagMinClusterPixels, + range_minimum=0, + range_maximum=512)) + + return_value.append(KeyValueMetaInt( + key=_KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF, + value=detection_parameters.aprilTagMinWhiteBlackDiff, + range_minimum=0, + range_maximum=256)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_QUAD_DECIMATE, + value=detection_parameters.aprilTagQuadDecimate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_QUAD_SIGMA, + value=detection_parameters.aprilTagQuadSigma, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + # Note: a relatively recent addition to OpenCV, may not be available in some python versions + if hasattr(detection_parameters, "useAruco3Detection"): + return_value.append(KeyValueMetaBool( + key=_KEY_USE_ARUCO_3_DETECTION, + value=detection_parameters.useAruco3Detection)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG, + value=detection_parameters.minMarkerLengthRatioOriginalImg, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=_KEY_MIN_SIDE_LENGTH_CANONICAL_IMG, + value=detection_parameters.minSideLengthCanonicalImg, + range_minimum=1, + range_maximum=512)) + + return return_value + + @staticmethod + def assign_key_value_list_to_aruco_detection_parameters( + detection_parameters: ..., # cv2.aruco.DetectionParameters + key_value_list: list[KeyValueSimpleAny] + ) -> list[str]: + """ + Returns list of mismatched keys + """ + mismatched_keys: list[str] = list() + key_value: KeyValueSimpleAbstract + for key_value in key_value_list: + if key_value.key == _KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeMin = key_value.value + elif key_value.key == _KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeMax = key_value.value + elif key_value.key == _KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeStep = key_value.value + elif key_value.key == _KEY_ADAPTIVE_THRESH_CONSTANT: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshConstant = key_value.value + elif key_value.key == _KEY_MIN_MARKER_PERIMETER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerPerimeterRate = key_value.value + elif key_value.key == _KEY_MAX_MARKER_PERIMETER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.maxMarkerPerimeterRate = key_value.value + elif key_value.key == _KEY_POLYGONAL_APPROX_ACCURACY_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.polygonalApproxAccuracyRate = key_value.value + elif key_value.key == _KEY_MIN_CORNER_DISTANCE_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minCornerDistanceRate = key_value.value + elif key_value.key == _KEY_MIN_MARKER_DISTANCE_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerDistanceRate = key_value.value + elif key_value.key == _KEY_MIN_DISTANCE_TO_BORDER: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minDistanceToBorder = key_value.value + elif key_value.key == _KEY_MARKER_BORDER_BITS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.markerBorderBits = key_value.value + elif key_value.key == _KEY_MIN_OTSU_STDDEV: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minOtsuStdDev = key_value.value + elif key_value.key == _KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.perspectiveRemovePixelPerCell = key_value.value + elif key_value.key == _KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.perspectiveRemoveIgnoredMarginPerCell = key_value.value + elif key_value.key == _KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.maxErroneousBitsInBorderRate = key_value.value + elif key_value.key == _KEY_ERROR_CORRECTION_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.errorCorrectionRate = key_value.value + elif key_value.key == _KEY_DETECT_INVERTED_MARKER: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.detectInvertedMarker = key_value.value + elif key_value.key == _KEY_CORNER_REFINEMENT_METHOD: + if not isinstance(key_value, KeyValueSimpleString): + mismatched_keys.append(key_value.key) + continue + corner_refinement_method: str = key_value.value + if corner_refinement_method in ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: + # noinspection PyTypeChecker + detection_parameters.cornerRefinementMethod = \ + ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT[ + corner_refinement_method] + else: + raise MCTSerializationError( + message=f"Failed to find corner refinement method {corner_refinement_method}.") + elif key_value.key == _KEY_CORNER_REFINEMENT_WIN_SIZE: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementWinSize = key_value.value + elif key_value.key == _KEY_CORNER_REFINEMENT_MAX_ITERATIONS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementMaxIterations = key_value.value + elif key_value.key == _KEY_CORNER_REFINEMENT_MIN_ACCURACY: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementMinAccuracy = key_value.value + elif key_value.key == _KEY_APRIL_TAG_CRITICAL_RAD: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagCriticalRad = key_value.value + elif key_value.key == _KEY_APRIL_TAG_DEGLITCH: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagDeglitch = int(key_value.value) + elif key_value.key == _KEY_APRIL_TAG_MAX_LINE_FIT_MSE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMaxLineFitMse = key_value.value + elif key_value.key == _KEY_APRIL_TAG_MAX_N_MAXIMA: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMaxNmaxima = key_value.value + elif key_value.key == _KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMinClusterPixels = key_value.value + elif key_value.key == _KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMinWhiteBlackDiff = key_value.value + elif key_value.key == _KEY_APRIL_TAG_QUAD_DECIMATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagQuadDecimate = key_value.value + elif key_value.key == _KEY_APRIL_TAG_QUAD_SIGMA: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagQuadSigma = key_value.value + elif key_value.key == _KEY_USE_ARUCO_3_DETECTION: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.useAruco3Detection = key_value.value + elif key_value.key == _KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerLengthRatioOriginalImg = key_value.value + elif key_value.key == _KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minSideLengthCanonicalImg = key_value.value + else: + mismatched_keys.append(key_value.key) + return mismatched_keys + + @staticmethod + def standard_aruco_dictionary() -> cv2.aruco.Dictionary: + return cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) + + @staticmethod + def standard_aruco_detection_parameters() -> cv2.aruco.DetectorParameters: + aruco_detector_parameters: cv2.aruco.DetectorParameters = cv2.aruco.DetectorParameters() + # TODO + # Curiously and counterintuitively, at least in testing with extrinsic calibration, + # it looks like no corner refinement may yield more accurate results than subpixel. + # More investigation seems warranted. + # aruco_detector_parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX + return aruco_detector_parameters + + @staticmethod + def target_from_marker_parameters( + base_label : str, + marker_size: float + ) -> Target: + """ + :param base_label: Should correspond to the index of the ArUco marker in the dictionary + :param marker_size: + """ + corner_points: list[list[float]] = MathUtils.square_marker_corner_points(marker_size=marker_size) + landmarks: list[Landmark] = [ + Landmark( + feature_label=f"{base_label}{Landmark.RELATION_CHARACTER}{corner_index}", + x=corner_point[0], y=corner_point[1], z=corner_point[2]) + for corner_index, corner_point in enumerate(corner_points)] + target: Target = Target(label=base_label, landmarks=landmarks) + return target diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py new file mode 100644 index 0000000..c5b846a --- /dev/null +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -0,0 +1,317 @@ +from .common_aruco_opencv import ArucoOpenCVCommon +from src.common import \ + Annotation, \ + ExtrinsicCalibration, \ + ExtrinsicCalibrationDetectorResult, \ + ExtrinsicCalibrator, \ + FeatureRay, \ + IntrinsicParameters, \ + Landmark, \ + MathUtils, \ + Matrix4x4, \ + Target +import cv2 +import cv2.aruco +import datetime +import numpy +from pydantic import BaseModel, Field +from scipy.spatial.transform import Rotation + + +class _ImageData(BaseModel): + """ + Helper structure - data stored for each image + """ + detector_label: str = Field() + annotations: list[Annotation] = Field() + rays: list[FeatureRay] = Field(default_factory=list) + + def annotations_as_points(self) -> list[list[float]]: + return [ + [annotation.x_px, annotation.y_px] + for annotation in self.annotations] + + +class _FeatureData(BaseModel): + """ + Helper structure - data stored for each feature + """ + feature_label: str = Field() + position: Landmark | None = Field(default=None) # None means it has not (or cannot) be calculated + + +class _TimestampData(BaseModel): + """ + Helper structure - data stored for each unique timestamp + """ + timestamp_utc_iso8601: str = Field() + images: list[_ImageData] = Field(default_factory=list) + features: list[_FeatureData] = Field(default_factory=list) + + +class _DetectorData(BaseModel): + """ + Helper structure - data stored for each detector + """ + detector_label: str = Field() + intrinsic_parameters: IntrinsicParameters = Field() + initial_to_reference: Matrix4x4 | None = Field(default=None) # Stored primarily for analyses + refined_to_reference: Matrix4x4 | None = Field(default=None) + + +class _CalibrationData(BaseModel): + """ + Helper structure - container for all things related to calibration + """ + timestamps: list[_TimestampData] = Field(default_factory=list) + detectors: list[_DetectorData] = Field(default_factory=list) + + def get_detector_container( + self, + detector_label: str + ) -> _DetectorData: + for detector in self.detectors: + if detector.detector_label == detector_label: + return detector + raise IndexError() + + def get_feature_container( + self, + timestamp_utc_iso8601: str, + feature_label: str + ) -> _FeatureData: + for timestamp in self.timestamps: + if timestamp.timestamp_utc_iso8601 == timestamp_utc_iso8601: + for feature in timestamp.features: + if feature.feature_label == feature_label: + return feature + break + raise IndexError() + + def get_image_container( + self, + timestamp_utc_iso8601: str, + detector_label: str + ) -> _ImageData: + for timestamp in self.timestamps: + if timestamp.timestamp_utc_iso8601 == timestamp_utc_iso8601: + for image in timestamp.images: + if image.detector_label == detector_label: + return image + break + raise IndexError() + + def get_timestamp_container( + self, + timestamp_utc_iso8601: str + ) -> _TimestampData: + for timestamp in self.timestamps: + if timestamp.timestamp_utc_iso8601 == timestamp_utc_iso8601: + return timestamp + raise IndexError() + + +class _Configuration(ExtrinsicCalibrator.Configuration): + termination_iteration_count: int = Field(default=500) + termination_rotation_change_degrees: int = Field(default=0.05) + termination_translation_change: int = Field(default=0.5) + ray_intersection_maximum_distance: float = Field(default=50.0) + + +class CharucoOpenCVExtrinsicCalibrator(ExtrinsicCalibrator): + + Configuration: type[ExtrinsicCalibrator.Configuration] = _Configuration + configuration: _Configuration + + def __init__(self, configuration: Configuration | dict): + if isinstance(configuration, dict): + configuration = _Configuration(**configuration) + self.configuration = configuration + super().__init__(configuration) + + @staticmethod + def _annotate_image( + aruco_detector_parameters: cv2.aruco.DetectorParameters, + aruco_dictionary: cv2.aruco.Dictionary, + image_metadata: ExtrinsicCalibrator.ImageMetadata + ) -> list[Annotation]: + image_rgb = cv2.imread(image_metadata.filepath) + image_greyscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) + annotations: list[Annotation] + annotations, _ = ArucoOpenCVCommon.annotations_from_greyscale_image( + aruco_detector_parameters=aruco_detector_parameters, + aruco_dictionary=aruco_dictionary, + image_greyscale=image_greyscale) + return annotations + + def _calculate_implementation( + self, + image_metadata_list: list[ExtrinsicCalibrator.ImageMetadata] + ) -> tuple[ExtrinsicCalibration, list[ExtrinsicCalibrator.ImageMetadata]]: + charuco_spec: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() + aruco_detector_parameters: cv2.aruco.DetectorParameters = \ + ArucoOpenCVCommon.standard_aruco_detection_parameters() + aruco_dictionary: cv2.aruco.Dictionary = ArucoOpenCVCommon.standard_aruco_dictionary() + charuco_target: Target = charuco_spec.as_target(target_label="board") + + # Populate _CalibrationData structure, including detection of annotations + data: _CalibrationData = _CalibrationData() + for metadata in image_metadata_list: + annotations: list[Annotation] = self._annotate_image( + aruco_detector_parameters=aruco_detector_parameters, + aruco_dictionary=aruco_dictionary, + image_metadata=metadata) + image_data: _ImageData = _ImageData( + detector_label=metadata.detector_label, + annotations=annotations) + timestamp_data: _TimestampData + try: + timestamp_data = data.get_timestamp_container(timestamp_utc_iso8601=metadata.timestamp_utc_iso8601) + except IndexError: + timestamp_data = _TimestampData(timestamp_utc_iso8601=metadata.timestamp_utc_iso8601) + data.timestamps.append(timestamp_data) + timestamp_data.images.append(image_data) + try: + data.get_detector_container(detector_label=image_data.detector_label) + except IndexError: + detector: _DetectorData = _DetectorData( + detector_label=metadata.detector_label, + intrinsic_parameters=self.detector_intrinsics_by_label[metadata.detector_label]) + data.detectors.append(detector) + for annotation in annotations: + try: + data.get_feature_container( + timestamp_utc_iso8601=metadata.timestamp_utc_iso8601, + feature_label=annotation.feature_label) + except IndexError: + feature_data: _FeatureData = _FeatureData(feature_label=annotation.feature_label) + timestamp_data = data.get_timestamp_container(timestamp_utc_iso8601=metadata.timestamp_utc_iso8601) + timestamp_data.features.append(feature_data) + + # Initial estimate of the pose of each detector relative to first frame + first_timestamp: _TimestampData = data.get_timestamp_container( + timestamp_utc_iso8601=min([metadata.timestamp_utc_iso8601 for metadata in image_metadata_list])) + for metadata in image_metadata_list: + if metadata.timestamp_utc_iso8601 == first_timestamp.timestamp_utc_iso8601: + image_data: _ImageData = data.get_image_container( + timestamp_utc_iso8601=metadata.timestamp_utc_iso8601, + detector_label=metadata.detector_label) + intrinsic_parameters: IntrinsicParameters = self.detector_intrinsics_by_label[metadata.detector_label] + estimated: bool + reference_to_initial: Matrix4x4 + estimated, reference_to_initial = MathUtils.estimate_matrix_transform_to_detector( + annotations=image_data.annotations, + landmarks=charuco_target.landmarks, + detector_intrinsics=intrinsic_parameters) + if estimated: + initial_to_reference: Matrix4x4 = reference_to_initial.inverse() + detector: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) + detector.initial_to_reference = initial_to_reference + detector.refined_to_reference = initial_to_reference + + for i in range(0, self.configuration.termination_iteration_count): + # Update each ray based on the current pose + for timestamp_data in data.timestamps: + for image_data in timestamp_data.images: + if len(image_data.annotations) == 0: + continue + detector_data: _DetectorData + try: + detector_data = data.get_detector_container(detector_label=image_data.detector_label) + except IndexError: + continue # Indicates that it was not possible to estimate the detector pose + feature_labels: list[str] = [annotation.feature_label for annotation in image_data.annotations] + ray_directions: list[list[float]] = MathUtils.convert_detector_points_to_vectors( + points=image_data.annotations_as_points(), + detector_intrinsics=detector_data.intrinsic_parameters, + detector_to_reference_matrix=detector_data.refined_to_reference) + source_point: list[float] = detector_data.refined_to_reference.get_translation() + annotation_count = len(image_data.annotations) + image_data.rays = [ + FeatureRay( + feature_label=feature_labels[annotation_index], + source_point=source_point, + direction=ray_directions[annotation_index]) + for annotation_index in range(0, annotation_count)] + # For each (timestamp, feature_label), intersect rays to get 3D positions in a common coordinate system + for timestamp_data in data.timestamps: + for feature_data in timestamp_data.features: + ray_list: list[FeatureRay] = list() + feature_label = feature_data.feature_label + for image_data in timestamp_data.images: + for ray in image_data.rays: + if ray.feature_label == feature_label: + ray_list.append(ray) + ray_intersection: MathUtils.RayIntersectionNOutput = MathUtils.closest_intersection_between_n_lines( + rays=ray_list, + maximum_distance=self.configuration.ray_intersection_maximum_distance) + if ray_intersection.intersection_count() > 0: + position: numpy.ndarray = ray_intersection.centroid() + feature_data.position = Landmark( + feature_label=feature_label, + x=float(position[0]), + y=float(position[1]), + z=float(position[2])) + else: + feature_data.position = None + # Use the newly-calculated 3D points together with the annotations to update the pose (PnP) + converged: bool = True # until shown otherwise + for detector_data in data.detectors: + landmarks: list[Landmark] = list() + annotations: list[Annotation] = list() + for timestamp_index, timestamp_data in enumerate(data.timestamps): + for feature_data in timestamp_data.features: + timestamped_feature_label: str = \ + f"{feature_data.feature_label}{Annotation.RELATION_CHARACTER}{timestamp_index}" + for image_data in timestamp_data.images: + if image_data.detector_label != detector_data.detector_label: + continue + for annotation in image_data.annotations: + if annotation.feature_label == feature_data.feature_label and \ + feature_data.position is not None: + landmarks.append(Landmark( + feature_label=timestamped_feature_label, + x=feature_data.position.x, + y=feature_data.position.y, + z=feature_data.position.z)) + annotations.append(Annotation( + feature_label=timestamped_feature_label, + x_px=annotation.x_px, + y_px=annotation.y_px)) + estimated: bool + reference_to_refined: Matrix4x4 + estimated, reference_to_refined = MathUtils.estimate_matrix_transform_to_detector( + annotations=annotations, + landmarks=landmarks, + detector_intrinsics=detector_data.intrinsic_parameters) + if not estimated: + raise NotImplemented( + "extrinsic_charuco_opencv: A detector pose was unable to get estimated. " + "This is not expected to occur and is not presently handled. " + "If you are seeing this, then please report that you are seeing this message.") + refined_to_reference: Matrix4x4 = reference_to_refined.inverse() + translation_change: float = numpy.linalg.norm( + numpy.asarray(refined_to_reference.get_translation()) - + numpy.asarray(detector_data.refined_to_reference.get_translation())) + old_to_refined: numpy.ndarray = numpy.matmul( + reference_to_refined.as_numpy_array(), + detector_data.refined_to_reference.as_numpy_array()) + # noinspection PyArgumentList + rotation_change_degrees: float = \ + numpy.linalg.norm(Rotation.from_matrix(old_to_refined[0:3, 0:3]).as_rotvec(degrees=True)) + detector_data.refined_to_reference = refined_to_reference + if rotation_change_degrees > self.configuration.termination_rotation_change_degrees or \ + translation_change > self.configuration.termination_translation_change: + converged = False + if converged: + break + + extrinsic_calibration: ExtrinsicCalibration = ExtrinsicCalibration( + timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), + calibrated_values=[ + ExtrinsicCalibrationDetectorResult( + detector_label=detector_data.detector_label, + detector_to_reference=detector_data.refined_to_reference) + for detector_data in data.detectors], + supplemental_data=data.model_dump()) + return extrinsic_calibration, image_metadata_list diff --git a/src/implementations/intrinsic_charuco_opencv.py b/src/implementations/intrinsic_charuco_opencv.py new file mode 100644 index 0000000..6ad0e66 --- /dev/null +++ b/src/implementations/intrinsic_charuco_opencv.py @@ -0,0 +1,133 @@ +from .common_aruco_opencv import ArucoOpenCVCommon +from src.common import \ + CalibrationErrorReason, \ + ImageResolution, \ + IntrinsicCalibration, \ + IntrinsicCalibrator, \ + IntrinsicParameters, \ + MCTCalibrationError +import cv2 +import cv2.aruco +import datetime +import numpy + + +class CharucoOpenCVIntrinsicCalibrator(IntrinsicCalibrator): + def _calculate_implementation( + self, + image_resolution: ImageResolution, + image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] + ) -> tuple[IntrinsicCalibration, list[IntrinsicCalibrator.ImageMetadata]]: + aruco_detector_parameters: cv2.aruco.DetectorParameters = \ + ArucoOpenCVCommon.standard_aruco_detection_parameters() + + # mismatched_keys: list[str] = ArucoOpenCVAnnotator.assign_key_value_list_to_aruco_detection_parameters( + # detection_parameters=aruco_detector_parameters, + # key_value_list=marker_parameters) + # if len(mismatched_keys) > 0: + # raise MCTIntrinsicCalibrationError( + # message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") + + charuco_spec = ArucoOpenCVCommon.CharucoBoard() + charuco_board: cv2.aruco.CharucoBoard = charuco_spec.create_board() + + all_charuco_corners = list() + all_charuco_ids = list() + used_image_metadata: list[IntrinsicCalibrator.ImageMetadata] = list() + for metadata in image_metadata_list: + image_rgb = cv2.imread(metadata.filepath) + image_greyscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) + (marker_corners, marker_ids, _) = cv2.aruco.detectMarkers( + image=image_greyscale, + dictionary=charuco_spec.aruco_dictionary(), + parameters=aruco_detector_parameters) + if len(marker_corners) <= 0: + continue + used_image_metadata.append(metadata) + # Note: + # Marker corners are the corners of the markers, whereas + # ChArUco corners are the corners of the chessboard. + # ChArUco calibration function works with the corners of the chessboard. + _, frame_charuco_corners, frame_charuco_ids = cv2.aruco.interpolateCornersCharuco( + markerCorners=marker_corners, + markerIds=marker_ids, + image=image_greyscale, + board=charuco_board, + ) + # Algorithm requires a minimum of 4 markers + if frame_charuco_corners is not None and len(frame_charuco_corners) >= 4: + all_charuco_corners.append(frame_charuco_corners) + all_charuco_ids.append(frame_charuco_ids) + + if len(all_charuco_corners) <= 0: + raise MCTCalibrationError( + reason=CalibrationErrorReason.COMPUTATION_FAILURE, + public_message="The input images did not contain visible markers.") + + # outputs to be stored in these containers + calibration_result = cv2.aruco.calibrateCameraCharucoExtended( + charucoCorners=all_charuco_corners, + charucoIds=all_charuco_ids, + board=charuco_board, + imageSize=numpy.array(charuco_spec.size_mm(), dtype="int32"), # Exception if float + cameraMatrix=numpy.identity(3, dtype='f'), + distCoeffs=numpy.zeros(5, dtype='f')) + + charuco_overall_reprojection_error = calibration_result[0] + charuco_camera_matrix = calibration_result[1] + charuco_distortion_coefficients = calibration_result[2] + charuco_rotation_vectors = calibration_result[3] + charuco_translation_vectors = calibration_result[4] + charuco_intrinsic_stdevs = calibration_result[5] + charuco_extrinsic_stdevs = calibration_result[6] + charuco_reprojection_errors = calibration_result[7] + + supplemental_data: dict = { + "reprojection_error": charuco_overall_reprojection_error, + "calibrated_stdevs": [value[0] for value in charuco_intrinsic_stdevs], + # "marker_parameters": marker_parameters, + "frame_results": [{ + "image_identifier": used_image_metadata[i].identifier, + "translation": [ + charuco_translation_vectors[i][0, 0], + charuco_translation_vectors[i][1, 0], + charuco_translation_vectors[i][2, 0]], + "rotation": [ + charuco_rotation_vectors[i][0, 0], + charuco_rotation_vectors[i][1, 0], + charuco_rotation_vectors[i][2, 0]], + "translation_stdev": [ + charuco_extrinsic_stdevs[i * 6 + 3, 0], + charuco_extrinsic_stdevs[i * 6 + 4, 0], + charuco_extrinsic_stdevs[i * 6 + 5, 0]], + "rotation_stdev": [ + charuco_extrinsic_stdevs[i * 6 + 0, 0], + charuco_extrinsic_stdevs[i * 6 + 1, 0], + charuco_extrinsic_stdevs[i * 6 + 2, 0]], + "reprojection_error": charuco_reprojection_errors[i, 0]} + for i in range(0, len(charuco_reprojection_errors))]} + + # TODO: Assertion on size of distortion coefficients being 5? + # Note: OpenCV documentation specifies the order of distortion coefficients + # https://docs.opencv.org/4.x/d9/d6a/group__aruco.html#ga366993d29fdddd995fba8c2e6ca811ea + # So far I have not seen calibration return a number of coefficients other than 5. + # Note too that there is an unchecked expectation that radial distortion be monotonic. + + intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration( + timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), + image_resolution=image_resolution, + calibrated_values=IntrinsicParameters( + focal_length_x_px=float(charuco_camera_matrix[0, 0]), + focal_length_y_px=float(charuco_camera_matrix[1, 1]), + optical_center_x_px=float(charuco_camera_matrix[0, 2]), + optical_center_y_px=float(charuco_camera_matrix[1, 2]), + radial_distortion_coefficients=[ + float(charuco_distortion_coefficients[0, 0]), + float(charuco_distortion_coefficients[1, 0]), + float(charuco_distortion_coefficients[4, 0])], + tangential_distortion_coefficients=[ + float(charuco_distortion_coefficients[2, 0]), + float(charuco_distortion_coefficients[3, 0])]), + supplemental_data=supplemental_data) + + return intrinsic_calibration, used_image_metadata diff --git a/src/main_detector.py b/src/main_detector.py index 032b343..498e7bc 100644 --- a/src/main_detector.py +++ b/src/main_detector.py @@ -1,4 +1,4 @@ -from src.detector.detector_app import app +from src.detector.app import app import logging import uvicorn diff --git a/src/main_pose_solver.py b/src/main_mixer.py similarity index 72% rename from src/main_pose_solver.py rename to src/main_mixer.py index 332a9b4..56aa823 100644 --- a/src/main_pose_solver.py +++ b/src/main_mixer.py @@ -1,12 +1,14 @@ +from src.mixer.app import app import logging import uvicorn def main(): uvicorn.run( - "src.pose_solver.pose_solver_app:app", + app, reload=False, port=8000, + host="0.0.0.0", log_level=logging.INFO) diff --git a/src/mixer/__init__.py b/src/mixer/__init__.py new file mode 100644 index 0000000..687e348 --- /dev/null +++ b/src/mixer/__init__.py @@ -0,0 +1,30 @@ +from.mixer import Mixer +from .api import \ + ExtrinsicCalibrationCalculateRequest, \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationDeleteStagedRequest, \ + ExtrinsicCalibrationImageAddRequest, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetRequest, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListRequest, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationImageMetadataUpdateRequest, \ + ExtrinsicCalibrationResultGetActiveRequest, \ + ExtrinsicCalibrationResultGetActiveResponse, \ + ExtrinsicCalibrationResultGetRequest, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultMetadataListRequest, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + ExtrinsicCalibrationResultMetadataUpdateRequest, \ + PoseSolverAddDetectorFrameRequest, \ + PoseSolverAddTargetRequest, \ + PoseSolverAddTargetResponse, \ + PoseSolverGetPosesRequest, \ + PoseSolverGetPosesResponse, \ + PoseSolverSetExtrinsicRequest, \ + PoseSolverSetReferenceRequest, \ + PoseSolverSetTargetsRequest, \ + MixerStartRequest, \ + MixerStopRequest, \ + MixerUpdateIntrinsicParametersRequest diff --git a/src/mixer/api.py b/src/mixer/api.py new file mode 100644 index 0000000..6944078 --- /dev/null +++ b/src/mixer/api.py @@ -0,0 +1,297 @@ +from src.common import \ + DetectorFrame, \ + ExtrinsicCalibration, \ + ExtrinsicCalibrator, \ + IntrinsicParameters, \ + Matrix4x4, \ + MCTRequest, \ + MCTResponse, \ + Pose, \ + Target +from pydantic import Field + + +class ExtrinsicCalibrationCalculateRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_calculate" + + parsable_type: str = Field(default=type_identifier()) + + +class ExtrinsicCalibrationCalculateResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_calculate" + + parsable_type: str = Field(default=type_identifier()) + + result_identifier: str = Field() + extrinsic_calibration: ExtrinsicCalibration = Field() + + +class ExtrinsicCalibrationDeleteStagedRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_delete_staged" + + parsable_type: str = Field(default=type_identifier()) + + +class ExtrinsicCalibrationImageAddRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_image_add" + + parsable_type: str = Field(default=type_identifier()) + + image_base64: str = Field() + detector_label: str = Field() + timestamp_utc_iso8601: str = Field() + + +class ExtrinsicCalibrationImageAddResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_image_add" + + parsable_type: str = Field(default=type_identifier()) + + image_identifier: str = Field() + + +class ExtrinsicCalibrationImageGetRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_image_get" + + parsable_type: str = Field(default=type_identifier()) + + image_identifier: str = Field() + + +class ExtrinsicCalibrationImageGetResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_image_get" + + parsable_type: str = Field(default=type_identifier()) + + image_base64: str = Field() + + +class ExtrinsicCalibrationImageMetadataListRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_image_metadata_list" + + parsable_type: str = Field(default=type_identifier()) + + +class ExtrinsicCalibrationImageMetadataListResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_image_metadata_list" + + parsable_type: str = Field(default=type_identifier()) + + metadata_list: list[ExtrinsicCalibrator.ImageMetadata] = Field(default_factory=list) + + +class ExtrinsicCalibrationImageMetadataUpdateRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_image_metadata_update" + + parsable_type: str = Field(default=type_identifier()) + + image_identifier: str = Field() + image_state: ExtrinsicCalibrator.ImageState = Field() + image_label: str | None = Field(default=None) + + +class ExtrinsicCalibrationResultGetRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_result_get" + + parsable_type: str = Field(default=type_identifier()) + + result_identifier: str = Field() + + +class ExtrinsicCalibrationResultGetResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_result_get" + + parsable_type: str = Field(default=type_identifier()) + + extrinsic_calibration: ExtrinsicCalibration = Field() + + +class ExtrinsicCalibrationResultGetActiveRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_result_active_get" + + parsable_type: str = Field(default=type_identifier()) + + +class ExtrinsicCalibrationResultGetActiveResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_result_active_get" + + parsable_type: str = Field(default=type_identifier()) + + extrinsic_calibration: ExtrinsicCalibration = Field() + + +class ExtrinsicCalibrationResultMetadataListRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_result_metadata_list" + + parsable_type: str = Field(default=type_identifier()) + + +class ExtrinsicCalibrationResultMetadataListResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_result_metadata_list" + + parsable_type: str = Field(default=type_identifier()) + + metadata_list: list[ExtrinsicCalibrator.ResultMetadata] = Field(default_factory=list) + + +class ExtrinsicCalibrationResultMetadataUpdateRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_extrinsic_calibration_result_metadata_update" + + parsable_type: str = Field(default=type_identifier()) + + result_identifier: str = Field() + result_state: ExtrinsicCalibrator.ResultState = Field() + result_label: str | None = Field(default=None) + + +class PoseSolverAddDetectorFrameRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_add_marker_corners" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + detector_label: str = Field() + detector_frame: DetectorFrame = Field() + + +class PoseSolverAddTargetRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_add_target" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + target: Target = Field() + + +class PoseSolverAddTargetResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_add_target" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + target_id: str = Field() + + +class PoseSolverGetPosesRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_get_poses" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + +class PoseSolverGetPosesResponse(MCTResponse): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_get_poses" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + detector_poses: list[Pose] + target_poses: list[Pose] + + +class PoseSolverSetExtrinsicRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_set_extrinsic_parameters" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + detector_label: str = Field() + transform_to_reference: Matrix4x4 = Field() + + +class PoseSolverSetReferenceRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_set_reference_marker" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + marker_id: int = Field() + marker_diameter: float = Field() + + +class PoseSolverSetTargetsRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_pose_solver_set_targets" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + targets: list[Target] = Field() + + +class MixerStartRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_start" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + +class MixerStopRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_stop" + + # noinspection PyTypeHints + parsable_type: str = Field(default=type_identifier()) + + +class MixerUpdateIntrinsicParametersRequest(MCTRequest): + @staticmethod + def type_identifier() -> str: + return "mixer_update_intrinsic_parameters" + + parsable_type: str = Field(default=type_identifier()) + + detector_label: str = Field() + intrinsic_parameters: IntrinsicParameters = Field() diff --git a/src/mixer/app.py b/src/mixer/app.py new file mode 100644 index 0000000..4cf90d9 --- /dev/null +++ b/src/mixer/app.py @@ -0,0 +1,96 @@ +from .api import \ + PoseSolverAddDetectorFrameRequest, \ + PoseSolverAddTargetRequest, \ + PoseSolverGetPosesResponse, \ + MixerUpdateIntrinsicParametersRequest +from .mixer import \ + Mixer +from src.common import \ + EmptyResponse, \ + ErrorResponse +import asyncio +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from fastapi.websockets import WebSocket +import hjson +import logging +import os +from typing import Final + + +# Note: This is the only implementation, currently. +from src.implementations.extrinsic_charuco_opencv import CharucoOpenCVExtrinsicCalibrator + + +logger = logging.getLogger(__name__) + +CONFIGURATION_FILEPATH_ENV_VAR: Final[str] = "MCSTRACK_MIXER_CONFIGURATION_FILEPATH" + + +def create_app() -> FastAPI: + configuration_filepath: str = os.path.join( + os.path.dirname(__file__), "..", "..", "data", "configuration", "mixer", "aruco.json") + configuration_filepath = os.getenv(CONFIGURATION_FILEPATH_ENV_VAR, configuration_filepath) + configuration: Mixer.Configuration + with open(configuration_filepath, 'r') as infile: + file_contents: str = infile.read() + configuration_dict = hjson.loads(file_contents) + configuration = Mixer.Configuration(**configuration_dict) + + mixer = Mixer( + configuration=configuration, + extrinsic_calibrator_type=CharucoOpenCVExtrinsicCalibrator) + mixer_app = FastAPI() + + # CORS Middleware + origins = ["http://localhost"] + mixer_app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"]) + + @mixer_app.post("/add_detector_frame") + async def add_marker_corners( + request: PoseSolverAddDetectorFrameRequest + ) -> EmptyResponse | ErrorResponse: + return mixer.pose_solver_add_detector_frame(request=request) + + @mixer_app.post("/add_target") + async def add_target_marker( + request: PoseSolverAddTargetRequest + ) -> EmptyResponse | ErrorResponse: + return mixer.pose_solver_add_target(request=request) + + @mixer_app.get("/get_poses") + async def get_poses() -> PoseSolverGetPosesResponse | ErrorResponse: + return mixer.pose_solver_get_poses() + + @mixer_app.post("/set_intrinsic_parameters") + async def set_intrinsic_parameters( + request: MixerUpdateIntrinsicParametersRequest + ) -> EmptyResponse | ErrorResponse: + return mixer.mixer_update_intrinsic_parameters(request=request) + + @mixer_app.head("/start") + async def start() -> None: + mixer.mixer_start() + + @mixer_app.head("/stop") + async def stop() -> None: + mixer.mixer_stop() + + @mixer_app.websocket("/websocket") + async def websocket_handler(websocket: WebSocket) -> None: + await mixer.websocket_handler(websocket=websocket) + + @mixer_app.on_event("startup") + async def internal_update() -> None: + await mixer.update() + asyncio.create_task(internal_update()) + + return mixer_app + + +app: FastAPI = create_app() diff --git a/src/mixer/mixer.py b/src/mixer/mixer.py new file mode 100644 index 0000000..e6ae863 --- /dev/null +++ b/src/mixer/mixer.py @@ -0,0 +1,369 @@ +from .api import \ + ExtrinsicCalibrationCalculateRequest, \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationDeleteStagedRequest, \ + ExtrinsicCalibrationImageAddRequest, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetRequest, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListRequest, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationImageMetadataUpdateRequest, \ + ExtrinsicCalibrationResultGetActiveRequest, \ + ExtrinsicCalibrationResultGetActiveResponse, \ + ExtrinsicCalibrationResultGetRequest, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultMetadataListRequest, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + ExtrinsicCalibrationResultMetadataUpdateRequest, \ + PoseSolverAddDetectorFrameRequest, \ + PoseSolverAddTargetRequest, \ + PoseSolverGetPosesRequest, \ + PoseSolverGetPosesResponse, \ + PoseSolverSetExtrinsicRequest, \ + MixerUpdateIntrinsicParametersRequest, \ + PoseSolverSetReferenceRequest, \ + PoseSolverSetTargetsRequest, \ + MixerStartRequest, \ + MixerStopRequest +from src.common import \ + EmptyResponse, \ + ErrorResponse, \ + ExtrinsicCalibration, \ + ExtrinsicCalibrator, \ + MCTCalibrationError, \ + MCTComponent, \ + MCTRequest, \ + MCTResponse, \ + Pose, \ + PoseSolver, \ + PoseSolverException +from enum import StrEnum +import logging +from pydantic import BaseModel, Field +from typing import Callable, Final + + +logger = logging.getLogger(__name__) + + +_ROLE_LABEL: Final[str] = "mixer" + + +class _ConfigurationSection(BaseModel): + implementation: str = Field() + configuration: dict = Field() + + +# noinspection DuplicatedCode +class Mixer(MCTComponent): + + class Configuration(BaseModel): + mixer_label: str = Field() + extrinsic_calibrator: _ConfigurationSection = Field() + + class Status(StrEnum): + STOPPED = "stopped" + RUNNING = "running" + FAILURE = "failure" + + _status: Status + + _configuration: Configuration + _pose_solver: PoseSolver + _extrinsic_calibrator: ExtrinsicCalibrator + + def __init__( + self, + configuration: Configuration, + extrinsic_calibrator_type: type[ExtrinsicCalibrator] + ): + super().__init__( + status_source_label=configuration.mixer_label, + send_status_messages_to_logger=True) + + self._configuration = configuration + self._pose_solver = PoseSolver() + self._extrinsic_calibrator = extrinsic_calibrator_type( + configuration=extrinsic_calibrator_type.Configuration( + **self._configuration.extrinsic_calibrator.configuration)) + + self._status = Mixer.Status.STOPPED + + def extrinsic_calibrator_calculate( + self, + **_kwargs + ) -> ExtrinsicCalibrationCalculateResponse | ErrorResponse: + result_identifier: str + extrinsic_calibration: ExtrinsicCalibration + try: + result_identifier, extrinsic_calibration = self._extrinsic_calibrator.calculate() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationCalculateResponse( + result_identifier=result_identifier, + extrinsic_calibration=extrinsic_calibration) + + def extrinsic_calibrator_delete_staged( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: + try: + self._extrinsic_calibrator.delete_staged() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return EmptyResponse() + + def extrinsic_calibrator_image_add( + self, + **kwargs + ) -> ExtrinsicCalibrationImageAddResponse | ErrorResponse: + request: ExtrinsicCalibrationImageAddRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationImageAddRequest) + image_identifier: str + try: + image_identifier = self._extrinsic_calibrator.add_image( + image_base64=request.image_base64, + detector_label=request.detector_label, + timestamp_utc_iso8601=request.timestamp_utc_iso8601) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationImageAddResponse(image_identifier=image_identifier) + + def extrinsic_calibrator_image_get( + self, + **kwargs + ) -> ExtrinsicCalibrationImageGetResponse | ErrorResponse: + request: ExtrinsicCalibrationImageGetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationImageGetRequest) + image_base64: str + try: + image_base64 = self._extrinsic_calibrator.get_image_by_identifier(identifier=request.image_identifier) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationImageGetResponse(image_base64=image_base64) + + def extrinsic_calibrator_image_metadata_list( + self, + **_kwargs + ) -> ExtrinsicCalibrationImageMetadataListResponse | ErrorResponse: + image_metadata_list: list[ExtrinsicCalibrator.ImageMetadata] + try: + image_metadata_list = self._extrinsic_calibrator.list_image_metadata() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationImageMetadataListResponse(metadata_list=image_metadata_list) + + def extrinsic_calibrator_image_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: ExtrinsicCalibrationImageMetadataUpdateRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationImageMetadataUpdateRequest) + try: + self._extrinsic_calibrator.update_image_metadata( + image_identifier=request.image_identifier, + image_state=request.image_state, + image_label=request.image_label) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return EmptyResponse() + + def extrinsic_calibrator_result_get_active( + self, + **_kwargs + ) -> ExtrinsicCalibrationResultGetActiveResponse | ErrorResponse: + calibration: ExtrinsicCalibration + try: + calibration = self._extrinsic_calibrator.get_result_active() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationResultGetActiveResponse(extrinsic_calibration=calibration) + + def extrinsic_calibrator_result_get( + self, + **kwargs + ) -> ExtrinsicCalibrationResultGetResponse | ErrorResponse: + request: ExtrinsicCalibrationResultGetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationResultGetRequest) + calibration: ExtrinsicCalibration + try: + calibration = self._extrinsic_calibrator.get_result(result_identifier=request.result_identifier) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationResultGetResponse(extrinsic_calibration=calibration) + + def extrinsic_calibrator_result_metadata_list( + self, + **_kwargs + ) -> ExtrinsicCalibrationResultMetadataListResponse | ErrorResponse: + result_metadata_list: list[ExtrinsicCalibrator.ResultMetadata] + try: + result_metadata_list = self._extrinsic_calibrator.list_result_metadata() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationResultMetadataListResponse(metadata_list=result_metadata_list) + + def extrinsic_calibrator_result_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: ExtrinsicCalibrationResultMetadataUpdateRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationResultMetadataUpdateRequest) + try: + self._extrinsic_calibrator.update_result_metadata( + result_identifier=request.result_identifier, + result_state=request.result_state, + result_label=request.result_label) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return EmptyResponse() + + @staticmethod + def get_role_label(): + return _ROLE_LABEL + + def mixer_start(self, **_kwargs) -> EmptyResponse: + self._status = Mixer.Status.RUNNING + return EmptyResponse() + + def mixer_stop(self, **_kwargs) -> EmptyResponse: + self._status = Mixer.Status.STOPPED + return EmptyResponse() + + def mixer_update_intrinsic_parameters( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: MixerUpdateIntrinsicParametersRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=MixerUpdateIntrinsicParametersRequest) + self._pose_solver.set_intrinsic_parameters( + detector_label=request.detector_label, + intrinsic_parameters=request.intrinsic_parameters) + self._extrinsic_calibrator.intrinsic_parameters_update( + detector_label=request.detector_label, + intrinsic_parameters=request.intrinsic_parameters) + return EmptyResponse() + + def pose_solver_add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverAddDetectorFrameRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverAddDetectorFrameRequest) + try: + self._pose_solver.add_detector_frame( + detector_label=request.detector_label, + frame_annotations=request.detector_frame.annotations, + frame_timestamp_utc=request.detector_frame.timestamp_utc) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_add_target(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverAddTargetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverAddTargetRequest) + try: + self._pose_solver.add_target(target=request.target) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_get_poses(self, **_kwargs) -> PoseSolverGetPosesResponse | ErrorResponse: + detector_poses: list[Pose] + target_poses: list[Pose] + try: + detector_poses, target_poses = self._pose_solver.get_poses() + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return PoseSolverGetPosesResponse( + detector_poses=detector_poses, + target_poses=target_poses) + + def pose_solver_set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverSetExtrinsicRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverSetExtrinsicRequest) + try: + self._pose_solver.set_extrinsic_matrix( + detector_label=request.detector_label, + transform_to_reference=request.transform_to_reference) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverSetReferenceRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverSetReferenceRequest) + try: + self._pose_solver.set_reference_target(target_id=str(request.marker_id)) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverSetTargetsRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverSetTargetsRequest) + try: + self._pose_solver.set_targets(targets=request.targets) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_methods() + return_value.update({ + ExtrinsicCalibrationCalculateRequest: self.extrinsic_calibrator_calculate, + ExtrinsicCalibrationDeleteStagedRequest: self.extrinsic_calibrator_delete_staged, + ExtrinsicCalibrationImageAddRequest: self.extrinsic_calibrator_image_add, + ExtrinsicCalibrationImageGetRequest: self.extrinsic_calibrator_image_get, + ExtrinsicCalibrationImageMetadataListRequest: self.extrinsic_calibrator_image_metadata_list, + ExtrinsicCalibrationImageMetadataUpdateRequest: self.extrinsic_calibrator_image_metadata_update, + ExtrinsicCalibrationResultGetActiveRequest: self.extrinsic_calibrator_result_get_active, + ExtrinsicCalibrationResultGetRequest: self.extrinsic_calibrator_result_get, + ExtrinsicCalibrationResultMetadataListRequest: self.extrinsic_calibrator_result_metadata_list, + ExtrinsicCalibrationResultMetadataUpdateRequest: self.extrinsic_calibrator_result_metadata_update, + MixerStartRequest: self.mixer_start, + MixerStopRequest: self.mixer_stop, + MixerUpdateIntrinsicParametersRequest: self.mixer_update_intrinsic_parameters, + PoseSolverAddDetectorFrameRequest: self.pose_solver_add_detector_frame, + PoseSolverAddTargetRequest: self.pose_solver_add_target, + PoseSolverGetPosesRequest: self.pose_solver_get_poses, + PoseSolverSetExtrinsicRequest: self.pose_solver_set_extrinsic_matrix, + PoseSolverSetReferenceRequest: self.pose_solver_set_reference_marker, + PoseSolverSetTargetsRequest: self.pose_solver_set_targets}) + return return_value + + async def update(self): + if self.time_sync_active: + return + if self._status == Mixer.Status.RUNNING: + self._pose_solver.update() diff --git a/src/pose_solver/api.py b/src/pose_solver/api.py deleted file mode 100644 index a2c6081..0000000 --- a/src/pose_solver/api.py +++ /dev/null @@ -1,167 +0,0 @@ -from src.common import \ - MCTRequest, \ - MCTResponse -from src.common.structures import \ - DetectorFrame, \ - IntrinsicParameters, \ - Matrix4x4, \ - Pose, \ - TargetBoard, \ - TargetMarker -from pydantic import Field -from typing import Final, Literal, Union - - -class PoseSolverAddDetectorFrameRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "add_marker_corners" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverAddDetectorFrameRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_label: str = Field() - detector_frame: DetectorFrame = Field() - - -class PoseSolverAddTargetMarkerRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "add_target_marker" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverAddTargetMarkerRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - target: TargetMarker = Field() - - -class PoseSolverAddTargetBoardRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "add_target_board" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverAddTargetBoardRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - target: TargetBoard = Field() - - -class PoseSolverAddTargetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "add_marker_corners" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverAddTargetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - target_id: str = Field() - - -class PoseSolverGetPosesRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "get_poses" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverGetPosesRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - -class PoseSolverGetPosesResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "get_poses" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverGetPosesResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_poses: list[Pose] - target_poses: list[Pose] - - -class PoseSolverSetExtrinsicRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_extrinsic_parameters" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverSetExtrinsicRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_label: str = Field() - transform_to_reference: Matrix4x4 = Field() - - -class PoseSolverSetIntrinsicRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_intrinsic_parameters" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverSetIntrinsicRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_label: str = Field() - intrinsic_parameters: IntrinsicParameters = Field() - - -class PoseSolverSetReferenceRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_reference_marker" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverSetReferenceRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - marker_id: int = Field() - marker_diameter: float = Field() - - -class PoseSolverSetTargetsRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_targets" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverSetTargetsRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - targets: list[Union[TargetMarker, TargetBoard]] = Field() - - -class PoseSolverStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "start_pose_solver" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverStartRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - -class PoseSolverStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "stop_pose_solver" - - @staticmethod - def parsable_type_identifier() -> str: - return PoseSolverStopRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/pose_solver/exceptions.py b/src/pose_solver/exceptions.py deleted file mode 100644 index f921c5a..0000000 --- a/src/pose_solver/exceptions.py +++ /dev/null @@ -1,5 +0,0 @@ -class PoseSolverException(Exception): - message: str - - def __init__(self, message: str, *args, **kwargs): - self.message = message diff --git a/src/pose_solver/pose_solver.py b/src/pose_solver/pose_solver.py deleted file mode 100644 index 4635b7d..0000000 --- a/src/pose_solver/pose_solver.py +++ /dev/null @@ -1,486 +0,0 @@ -from .exceptions import \ - PoseSolverException -from .structures import \ - DetectorRecord, \ - DetectorFrameRecord, \ - Ray, \ - PoseSolverParameters -from .util import \ - average_quaternion, \ - average_vector, \ - closest_intersection_between_n_lines, \ - IterativeClosestPointParameters, \ - iterative_closest_point_for_points_and_rays -from src.common.structures import \ - DetectorFrame, \ - IntrinsicParameters, \ - Matrix4x4, \ - Pose, \ - TargetBase -from src.common.util import \ - MathUtils, \ - register_corresponding_points -import cv2 -import cv2.aruco -import datetime -import itertools -import numpy -from scipy.spatial.transform import Rotation -from typing import Final, TypeVar - - -EPSILON: Final[float] = 0.0001 -_CORNER_COUNT: Final[int] = 4 - -KeyType = TypeVar("KeyType") -ValueType = TypeVar("ValueType") - - -class PoseSolver: - """ - Class containing the actual "solver" logic, kept separate from the API. - """ - - # bookkeeping - _last_change_timestamp_utc: datetime.datetime - _last_updated_timestamp_utc: datetime.datetime - - # inputs - _parameters: PoseSolverParameters - _intrinsics_by_detector_label: dict[str, IntrinsicParameters] - _extrinsics_by_detector_label: dict[str, Matrix4x4] - _targets: list[TargetBase] # First target is considered the "reference" - # input per frame - _detector_records_by_detector_label: dict[str, DetectorRecord] - - # internal threshold - _minimum_marker_age_before_removal_seconds: float - # use this to make sure each marker is associated uniquely to a single target - _marker_target_map: dict[str, TargetBase] # Each marker shall be used at most once by a single target - - # outputs - _poses_by_target_id: dict[str, Matrix4x4] - _poses_by_detector_label: dict[str, Matrix4x4] - - def __init__( - self - ): - self._last_change_timestamp_utc = datetime.datetime.min - self._last_updated_timestamp_utc = datetime.datetime.min - - self._parameters = PoseSolverParameters() - self._intrinsics_by_detector_label = dict() - self._extrinsics_by_detector_label = dict() - self._targets = list() - self._detector_records_by_detector_label = dict() - - self._minimum_marker_age_before_removal_seconds = max([ - self._parameters.POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS, - self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS]) - self._marker_target_map = dict() - - self._poses_by_target_id = dict() - self._poses_by_detector_label = dict() - - def add_detector_frame( - self, - detector_label: str, - detector_frame: DetectorFrame - ) -> None: - detector_frame_record: DetectorFrameRecord = DetectorFrameRecord( - detector_label=detector_label, - frame=detector_frame) - if detector_label not in self._detector_records_by_detector_label: - self._detector_records_by_detector_label[detector_label] = DetectorRecord() - self._detector_records_by_detector_label[detector_label].clear_frame_records() - self._detector_records_by_detector_label[detector_label].add_frame_record(detector_frame_record) - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def add_target( - self, - target: TargetBase - ) -> None: - for existing_target in self._targets: - if target.target_id == existing_target.target_id: - raise PoseSolverException( - f"Target with name {target.target_id} is already registered. " - f"Please use a different name, and also make sure you are not adding the same target twice.") - marker_ids = target.get_marker_ids() - for marker_id in marker_ids: - if marker_id in self._marker_target_map: - target_id: str = self._marker_target_map[marker_id].target_id - raise PoseSolverException( - f"Marker {marker_id} is already used with target {target_id} and it cannot be reused.") - target_index = len(self._targets) - self._targets.append(target) - for marker_id in marker_ids: - self._marker_target_map[marker_id] = self._targets[target_index] - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def clear_extrinsic_matrices(self): - self._extrinsics_by_detector_label.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def clear_intrinsic_parameters(self): - self._intrinsics_by_detector_label.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def clear_targets(self): - self._targets.clear() - self._marker_target_map.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def get_poses( - self - ) -> tuple[list[Pose], list[Pose]]: - """ - Returns detector_poses, target_poses - """ - detector_poses: list[Pose] = [ - Pose( - target_id=detector_label, - object_to_reference_matrix=pose, - solver_timestamp_utc_iso8601=self._last_updated_timestamp_utc.isoformat()) - for detector_label, pose in self._poses_by_detector_label.items()] - target_poses: list[Pose] = [ - Pose( - target_id=str(target_id), - object_to_reference_matrix=pose, - solver_timestamp_utc_iso8601=self._last_updated_timestamp_utc.isoformat()) - for target_id, pose in self._poses_by_target_id.items()] - return detector_poses, target_poses - - def list_targets(self) -> list[TargetBase]: - return self._targets - - def set_extrinsic_matrix( - self, - detector_label: str, - transform_to_reference: Matrix4x4 - ) -> None: - self._extrinsics_by_detector_label[detector_label] = transform_to_reference - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def set_intrinsic_parameters( - self, - detector_label: str, - intrinsic_parameters: IntrinsicParameters - ) -> None: - self._intrinsics_by_detector_label[detector_label] = intrinsic_parameters - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def set_reference_target( - self, - target_id: str - ) -> None: - found: bool = False - for target_index, target in enumerate(self._targets): - if target.target_id == target_id: - self._targets[0], self._targets[target_index] = self._targets[target_index], self._targets[0] - self._last_change_timestamp_utc = datetime.datetime.utcnow() - found = True - break - if not found: - raise PoseSolverException(f"{target_id} was not found.") - - def set_targets( - self, - targets: list[TargetBase] - ) -> None: - self._targets = targets - self._poses_by_target_id.clear() - self._poses_by_detector_label.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() - - def _calculate_reprojection_error_for_pose( - self, - ray_set: ..., - object_points_target: list[list[float]], - object_to_reference_translation: list[float], - object_to_reference_rotation_quaternion: list[float] - ) -> float: - object_to_reference_matrix = numpy.identity(4, dtype="float32") - object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(object_to_reference_rotation_quaternion).as_matrix() - object_to_reference_matrix[0:3, 3] = object_to_reference_translation - object_points_reference = numpy.empty((len(object_points_target), 3), dtype="float32") - for object_point_index, object_point_target in enumerate(object_points_target): - object_point_reference = numpy.matmul( - object_to_reference_matrix, - [object_point_target[0], object_point_target[1], object_point_target[2], 1.0]) - object_points_reference[object_point_index, 0:3] = object_point_reference[0:3] - detector_label: str = ray_set.detector_label - reference_to_detector_matrix: Matrix4x4 = ray_set.detector_to_reference_matrix - reference_to_detector: numpy.ndarray = reference_to_detector_matrix.as_numpy_array() - reference_to_detector_rotation_vector = \ - Rotation.as_rotvec(Rotation.from_matrix(reference_to_detector[0:3, 0:3])) - reference_to_detector_translation = reference_to_detector[0:3, 3] - camera_matrix = numpy.array(self._intrinsics_by_detector_label[detector_label].get_matrix(), dtype="float32") - camera_distortion_coefficients = \ - numpy.array(self._intrinsics_by_detector_label[detector_label].get_distortion_coefficients(), - dtype="float32") - project_points_result = cv2.projectPoints( - objectPoints=object_points_reference, - rvec=reference_to_detector_rotation_vector, - tvec=reference_to_detector_translation, - cameraMatrix=camera_matrix, - distCoeffs=camera_distortion_coefficients) - projected_points = project_points_result[0] - sum_reprojection_errors_squared: float = 0.0 - for point_index, image_point in enumerate(ray_set.image_points): - reprojection_error_for_point = \ - numpy.linalg.norm(projected_points[point_index, 0, 0:2] - image_point) - sum_reprojection_errors_squared += reprojection_error_for_point ** 2 - mean_reprojection_errors_squared: float = sum_reprojection_errors_squared / len(object_points_target) - rms_reprojection_error = numpy.sqrt(mean_reprojection_errors_squared) - return rms_reprojection_error - - @staticmethod - def _denoise_is_pose_pair_outlier( - pose_a_object_to_reference_matrix: numpy.ndarray, - pose_b_object_to_reference_matrix: numpy.ndarray, - parameters: PoseSolverParameters - ) -> bool: - - position_a = pose_a_object_to_reference_matrix[0:3, 3] - position_b = pose_b_object_to_reference_matrix[0:3, 3] - distance_millimeters = numpy.linalg.norm(position_a - position_b) - if distance_millimeters > parameters.DENOISE_OUTLIER_DISTANCE_MILLIMETERS: - return True - - orientation_a = pose_a_object_to_reference_matrix[0:3, 0:3] - orientation_b = pose_b_object_to_reference_matrix[0:3, 0:3] - rotation_a_to_b = numpy.matmul(orientation_a, numpy.linalg.inv(orientation_b)) - angle_degrees = numpy.linalg.norm(Rotation.from_matrix(rotation_a_to_b).as_rotvec()) - if angle_degrees > parameters.DENOISE_OUTLIER_DISTANCE_MILLIMETERS: - return True - - return False - - # Take an average of recent poses, with detection and removal of outliers - # TODO: Currently not used - but it's probably a good feature to have - def _denoise_detector_to_reference_pose( - self, - object_label: str, - raw_poses: list[...] # In order, oldest to newest - ) -> ...: - most_recent_pose = raw_poses[-1] - max_storage_size: int = self._parameters.DENOISE_STORAGE_SIZE - filter_size: int = self._parameters.DENOISE_FILTER_SIZE - if filter_size <= 1 or max_storage_size <= 1: - return most_recent_pose # trivial case - - # find a consistent range of recent indices - poses: list[...] = list(raw_poses) - poses.reverse() # now they are sorted so that the first element is most recent - required_starting_streak: int = self._parameters.DENOISE_REQUIRED_STARTING_STREAK - starting_index: int = -1 # not yet known, we want to find this - if required_starting_streak <= 1: - starting_index = 0 # trivial case - else: - current_inlier_streak: int = 1 # comparison always involves a streak of size 1 or more - for pose_index in range(1, len(raw_poses)): - if self._denoise_is_pose_pair_outlier(poses[pose_index - 1], poses[pose_index]): - current_inlier_streak = 1 - continue - current_inlier_streak += 1 - if current_inlier_streak >= required_starting_streak: - starting_index = pose_index - required_starting_streak + 1 - break - - if starting_index < 0: - if len(poses) >= max_storage_size: # There appear to be enough poses, so data seems to be inconsistent - print( - "Warning: Can't find consistent pose streak. " - "Will use most recent raw pose for object " + object_label + ".") - return most_recent_pose - - poses_to_average: list[...] = [poses[starting_index]] - for pose_index in range(starting_index + 1, len(poses)): - if (not self._denoise_is_pose_pair_outlier(poses[pose_index - 1], poses[pose_index])) or \ - (not self._denoise_is_pose_pair_outlier(poses[starting_index], poses[pose_index])): - poses_to_average.append(poses[pose_index]) - if len(poses_to_average) > filter_size: - break - - translations = [list(pose.object_to_reference_matrix[0:3, 3]) - for pose in poses_to_average] - orientations = [list(Rotation.from_matrix(pose.object_to_reference_matrix[0:3, 0:3]).as_quat(canonical=True)) - for pose in poses_to_average] - filtered_translation = average_vector(translations) - filtered_orientation = average_quaternion(orientations) - filtered_object_to_reference_matrix = numpy.identity(4, dtype="float32") - filtered_object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(filtered_orientation).as_matrix() - filtered_object_to_reference_matrix[0:3, 3] = filtered_translation - # return PoseData( - # object_label=object_label, - # object_to_reference_matrix=filtered_object_to_reference_matrix, - # ray_sets=most_recent_pose.ray_sets) - - def update(self) -> None: - if self._last_updated_timestamp_utc >= self._last_change_timestamp_utc: - return - - if len(self._targets) == 0: - return - - self._last_updated_timestamp_utc = datetime.datetime.utcnow() - self._poses_by_detector_label.clear() - self._poses_by_target_id.clear() - - corners: dict[str, dict[str, list[list[float]]]] # [detector_label][marker_id][point_index][x/y] - corners = { - detector_label: detector_record.get_corners() - for detector_label, detector_record in self._detector_records_by_detector_label.items()} - detector_labels: list[str] = list(self._detector_records_by_detector_label.keys()) - - for detector_label in detector_labels: - if detector_label not in self._intrinsics_by_detector_label: - # TODO: Output a suitable warning that no intrinsics have been received, but don't do it every frame - del corners[detector_label] - - reference_target: TargetBase = self._targets[0] - for detector_label in detector_labels: - if detector_label in self._extrinsics_by_detector_label: - self._poses_by_detector_label[detector_label] = self._extrinsics_by_detector_label[detector_label] - else: - intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] - reference_to_detector: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( - target=reference_target, - corners_by_marker_id=corners[detector_label], - detector_intrinsics=intrinsics) - detector_to_reference: Matrix4x4 = Matrix4x4.from_numpy_array( - numpy.linalg.inv(reference_to_detector.as_numpy_array())) - self._poses_by_detector_label[detector_label] = detector_to_reference - - # At the time of writing, each marker_id can be used only once. - # So we can remove marker_ids used by the reference_target to avoid unnecessary processing. - for detector_label in detector_labels: - for marker_id in reference_target.get_marker_ids(): - corners[detector_label].pop(marker_id) - - rays: dict[str, list[list[Ray]]] = dict() # indexed as [marker_id][detector_index][corner_index] - detector_labels_by_marker_id: dict[str, list[str]] = dict() - for detector_label in detector_labels: - detector_to_reference: Matrix4x4 = self._poses_by_detector_label[detector_label] - intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] - ray_origin: list[float] = detector_to_reference.get_translation() - ray_directions_by_marker_id: dict[str, list[list[float]]] # [marker_id][point_index][x/y/z] - ray_directions_by_marker_id = MathUtils.convert_detector_corners_to_vectors( - corners_by_marker_id=corners[detector_label], - detector_intrinsics=intrinsics, - detector_to_reference_matrix=detector_to_reference) - for marker_id, ray_directions in ray_directions_by_marker_id.items(): - if marker_id not in rays: - rays[marker_id] = list() - rays[marker_id].append([ - Ray(source_point=ray_origin, direction=ray_directions_by_marker_id[marker_id][corner_index]) - for corner_index in range(0, _CORNER_COUNT)]) - if marker_id not in detector_labels_by_marker_id: - detector_labels_by_marker_id[marker_id] = list() - detector_labels_by_marker_id[marker_id].append(detector_label) - - # intersect rays to find the 3D points for each marker corner in reference coordinates - intersections_by_marker_id: dict[str, list[list[float]]] = dict() # [marker_id][corner_index][x/y/z] - standalone_rays_marker_ids: list[str] = list() - for marker_id, rays_by_detector_index in rays.items(): - ray_list_by_corner_index: list[list[Ray]] = [[ - rays[marker_id][detector_index][corner_index] - for detector_index in range(0, len(rays[marker_id]))] - for corner_index in range(0, _CORNER_COUNT)] - intersections_appear_valid: bool = True # If something looks off, set this to False - corners_reference_by_corner_index: list[list[float]] = list() - for corner_index in range(0, _CORNER_COUNT): - intersection_result = closest_intersection_between_n_lines( - rays=ray_list_by_corner_index[corner_index], - maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) - if intersection_result.centroids.shape[0] == 0: - intersections_appear_valid = False - break - corners_reference_by_corner_index.append(list(intersection_result.centroid().flatten())) - if not intersections_appear_valid: - standalone_rays_marker_ids.append(marker_id) - continue - intersections_by_marker_id[marker_id] = corners_reference_by_corner_index - - # We estimate the pose of each target based on the calculated intersections - # and the rays projected from each detector - for target in self._targets: - if target.target_id == str(reference_target.target_id): - continue # everything is expressed relative to the reference... - detected_marker_ids_in_target: list[str] = target.get_marker_ids() - - marker_ids_with_intersections: list[str] = list() - marker_ids_with_rays: list[str] = list() - detector_labels: set[str] = set() - for marker_id in detected_marker_ids_in_target: - if marker_id in intersections_by_marker_id: - marker_ids_with_intersections.append(marker_id) - if marker_id in standalone_rays_marker_ids: - marker_ids_with_rays.append(marker_id) - if marker_id in detector_labels_by_marker_id: - for detector_label in detector_labels_by_marker_id[marker_id]: - detector_labels.add(detector_label) - - if len(marker_ids_with_intersections) <= 0 and len(marker_ids_with_rays) <= 0: - continue # No information on which to base a pose - - if len(detector_labels) < self._parameters.minimum_detector_count: - continue - - # NB. len() == 0 or less for either of these indicates an internal error - one_detector_only: bool = (len(detector_labels) == 1) - if one_detector_only: - # Note: there cannot be any intersections in this case - detector_label: str = next(iter(detector_labels)) - intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] - detected_to_detector_matrix4x4: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( - target=target, - corners_by_marker_id=corners[detector_label], - detector_intrinsics=intrinsics) - detected_to_detector: numpy.ndarray = detected_to_detector_matrix4x4.as_numpy_array() - detector_to_reference: numpy.ndarray = self._poses_by_detector_label[detector_label].as_numpy_array() - detected_to_reference: numpy.ndarray = detector_to_reference @ detected_to_detector - self._poses_by_target_id[target.target_id] = Matrix4x4.from_numpy_array(detected_to_reference) - else: - # Fill in the required variables for the customized iterative closest point - detected_known_points: list[list[float]] = list(itertools.chain.from_iterable([ - target.get_points_for_marker_id(marker_id) - for marker_id in marker_ids_with_intersections])) - reference_known_points: list[list[float]] = list(itertools.chain.from_iterable([ - intersections_by_marker_id[marker_id] - for marker_id in marker_ids_with_intersections])) - detected_ray_points: list[list[float]] = list(itertools.chain.from_iterable([ - target.get_points_for_marker_id(marker_id) - for marker_id in marker_ids_with_rays])) - reference_rays: list[Ray] = list(itertools.chain.from_iterable([ - rays[marker_id] for marker_id in marker_ids_with_rays])) - iterative_closest_point_parameters = IterativeClosestPointParameters( - termination_iteration_count=self._parameters.icp_termination_iteration_count, - termination_delta_translation=self._parameters.icp_termination_translation, - termination_delta_rotation_radians=self._parameters.icp_termination_rotation_radians, - termination_mean_point_distance=self._parameters.icp_termination_mean_point_distance, - termination_rms_point_distance=self._parameters.icp_termination_rms_point_distance) - if len(marker_ids_with_intersections) >= 1: - initial_detected_to_reference_matrix = register_corresponding_points( - point_set_from=detected_known_points, - point_set_to=reference_known_points, - collinearity_do_check=False) - icp_output = iterative_closest_point_for_points_and_rays( - source_known_points=detected_known_points, - target_known_points=reference_known_points, - source_ray_points=detected_ray_points, - target_rays=reference_rays, - initial_transformation_matrix=initial_detected_to_reference_matrix, - parameters=iterative_closest_point_parameters) - else: - icp_output = iterative_closest_point_for_points_and_rays( - source_known_points=detected_known_points, - target_known_points=reference_known_points, - source_ray_points=detected_ray_points, - target_rays=reference_rays, - parameters=iterative_closest_point_parameters) - detected_to_reference = icp_output.source_to_target_matrix - self._poses_by_target_id[target.target_id] = Matrix4x4.from_numpy_array(detected_to_reference) diff --git a/src/pose_solver/pose_solver_api.py b/src/pose_solver/pose_solver_api.py deleted file mode 100644 index 2886601..0000000 --- a/src/pose_solver/pose_solver_api.py +++ /dev/null @@ -1,160 +0,0 @@ -from .api import \ - PoseSolverAddDetectorFrameRequest, \ - PoseSolverAddTargetMarkerRequest, \ - PoseSolverGetPosesRequest, \ - PoseSolverGetPosesResponse, \ - PoseSolverSetExtrinsicRequest, \ - PoseSolverSetIntrinsicRequest, \ - PoseSolverSetReferenceRequest, \ - PoseSolverSetTargetsRequest, \ - PoseSolverStartRequest, \ - PoseSolverStopRequest -from .exceptions import PoseSolverException -from .pose_solver import PoseSolver -from .structures import \ - PoseSolverConfiguration -from src.common import \ - EmptyResponse, \ - ErrorResponse, \ - get_kwarg, \ - MCTComponent, \ - MCTRequest, \ - MCTResponse -from src.common.structures import \ - Pose, \ - PoseSolverStatus -import logging -from typing import Callable - - -logger = logging.getLogger(__name__) - - -class PoseSolverAPI(MCTComponent): - """ - API-friendly layer overtop of a PoseSolver - """ - _status: PoseSolverStatus - _pose_solver: PoseSolver - - def __init__( - self, - configuration: PoseSolverConfiguration, - pose_solver: PoseSolver - ): - super().__init__( - status_source_label=configuration.serial_identifier, - send_status_messages_to_logger=True) - self._pose_solver = pose_solver - self._status = PoseSolverStatus() - - def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddDetectorFrameRequest = get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverAddDetectorFrameRequest) - try: - self._pose_solver.add_detector_frame( - detector_label=request.detector_label, - detector_frame=request.detector_frame) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def add_target(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddTargetMarkerRequest = get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverAddTargetMarkerRequest) - try: - self._pose_solver.add_target(target=request.target) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def get_poses(self, **_kwargs) -> PoseSolverGetPosesResponse | ErrorResponse: - detector_poses: list[Pose] - target_poses: list[Pose] - try: - detector_poses, target_poses = self._pose_solver.get_poses() - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return PoseSolverGetPosesResponse( - detector_poses=detector_poses, - target_poses=target_poses) - - def set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetExtrinsicRequest = get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetExtrinsicRequest) - try: - self._pose_solver.set_extrinsic_matrix( - detector_label=request.detector_label, - transform_to_reference=request.transform_to_reference) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def set_intrinsic_parameters(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetIntrinsicRequest = get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetIntrinsicRequest) - try: - self._pose_solver.set_intrinsic_parameters( - detector_label=request.detector_label, - intrinsic_parameters=request.intrinsic_parameters) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetReferenceRequest = get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetReferenceRequest) - try: - self._pose_solver.set_reference_target(target_id=str(request.marker_id)) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetTargetsRequest = get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetTargetsRequest) - try: - self._pose_solver.set_targets(targets=request.targets) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def start_pose_solver(self, **_kwargs) -> EmptyResponse: - self._status.solve_status = PoseSolverStatus.Solve.RUNNING - return EmptyResponse() - - def stop_pose_solver(self, **_kwargs) -> EmptyResponse: - self._status.solve_status = PoseSolverStatus.Solve.STOPPED - return EmptyResponse() - - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: - return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() - return_value.update({ - PoseSolverAddDetectorFrameRequest: self.add_detector_frame, - PoseSolverAddTargetMarkerRequest: self.add_target, - PoseSolverGetPosesRequest: self.get_poses, - PoseSolverSetExtrinsicRequest: self.set_extrinsic_matrix, - PoseSolverSetIntrinsicRequest: self.set_intrinsic_parameters, - PoseSolverSetReferenceRequest: self.set_reference_marker, - PoseSolverSetTargetsRequest: self.set_targets, - PoseSolverStartRequest: self.start_pose_solver, - PoseSolverStopRequest: self.stop_pose_solver}) - return return_value - - async def update(self): - if self.time_sync_active: - return - if self._status.solve_status == PoseSolverStatus.Solve.RUNNING: - self._pose_solver.update() diff --git a/src/pose_solver/pose_solver_app.py b/src/pose_solver/pose_solver_app.py deleted file mode 100644 index d4009b2..0000000 --- a/src/pose_solver/pose_solver_app.py +++ /dev/null @@ -1,92 +0,0 @@ -from .api import \ - PoseSolverAddDetectorFrameRequest, \ - PoseSolverAddTargetMarkerRequest, \ - PoseSolverGetPosesResponse, \ - PoseSolverSetIntrinsicRequest -from .pose_solver import \ - PoseSolver -from .pose_solver_api import \ - PoseSolverAPI -from .structures import \ - PoseSolverConfiguration -from src.common import \ - EmptyResponse, \ - ErrorResponse -import asyncio -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from fastapi.websockets import WebSocket -import hjson -import logging -import os - - -logger = logging.getLogger(__name__) -pose_solver = PoseSolver() - - -def create_app() -> FastAPI: - configuration_filepath: str = os.path.join( - os.path.dirname(__file__), "..", "..", "data", "pose_solver_config.json") - configuration: PoseSolverConfiguration - with open(configuration_filepath, 'r') as infile: - file_contents: str = infile.read() - configuration_dict = hjson.loads(file_contents) - configuration = PoseSolverConfiguration(**configuration_dict) - pose_solver_api = PoseSolverAPI( - configuration=configuration, - pose_solver=pose_solver) - pose_solver_app = FastAPI() - - # CORS Middleware - origins = ["http://localhost"] - pose_solver_app.add_middleware( - CORSMiddleware, - allow_origins=origins, - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"]) - - @pose_solver_app.post("/add_detector_frame") - async def add_marker_corners( - request: PoseSolverAddDetectorFrameRequest - ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.add_detector_frame(request=request) - - @pose_solver_app.post("/add_target") - async def add_target_marker( - request: PoseSolverAddTargetMarkerRequest - ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.add_target(request=request) - - @pose_solver_app.get("/get_poses") - async def get_poses() -> PoseSolverGetPosesResponse | ErrorResponse: - return pose_solver_api.get_poses() - - @pose_solver_app.post("/set_intrinsic_parameters") - async def set_intrinsic_parameters( - request: PoseSolverSetIntrinsicRequest - ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.set_intrinsic_parameters(request=request) - - @pose_solver_app.head("/start_capture") - async def start_capture() -> None: - pose_solver_api.start_pose_solver() - - @pose_solver_app.head("/stop_capture") - async def stop_capture() -> None: - pose_solver_api.stop_pose_solver() - - @pose_solver_app.websocket("/websocket") - async def websocket_handler(websocket: WebSocket) -> None: - await pose_solver_api.websocket_handler(websocket=websocket) - - @pose_solver_app.on_event("startup") - async def internal_update() -> None: - await pose_solver_api.update() - asyncio.create_task(internal_update()) - - return pose_solver_app - - -app: FastAPI = create_app() diff --git a/src/pose_solver/structures.py b/src/pose_solver/structures.py deleted file mode 100644 index 9004252..0000000 --- a/src/pose_solver/structures.py +++ /dev/null @@ -1,169 +0,0 @@ -from src.common.structures import \ - DetectorFrame, \ - Matrix4x4 -import cv2.aruco -import datetime -import numpy -from pydantic import BaseModel, Field -from typing import Final - - -EPSILON: Final[float] = 0.0001 - - -class DetectorFrameRecord: - _detector_label: str - _frame: DetectorFrame - _timestamp_utc: datetime.datetime | None - _corners_by_marker_id: dict[str, list[list[float]]] | None - - def __init__( - self, - detector_label: str, - frame: DetectorFrame - ): - self._detector_label = detector_label - self._frame = frame - self._timestamp_utc = None # calculated when needed - self._corners_by_marker_id = None - - def _init_corners_by_marker_id(self): - self._corners_by_marker_id = dict() - for snapshot in self._frame.detected_marker_snapshots: - self._corners_by_marker_id[snapshot.label] = [ - [corner_image_point.x_px, corner_image_point.y_px] - for corner_image_point in snapshot.corner_image_points] - - def get_detector_label(self) -> str: - return self._detector_label - - def get_frame(self) -> DetectorFrame: - return self._frame - - def get_marker_corners_by_marker_id( - self, - marker_id: str - ) -> list[list[float]] | None: - if self._corners_by_marker_id is None: - self._init_corners_by_marker_id() - if marker_id in list(self._corners_by_marker_id.keys()): - return self._corners_by_marker_id[marker_id] - return None - - def get_marker_ids_detected(self) -> list[str]: - if self._corners_by_marker_id is None: - self._init_corners_by_marker_id() - return list(self._corners_by_marker_id.keys()) - - def get_timestamp_utc(self): - if self._timestamp_utc is None: - self._timestamp_utc = self._frame.timestamp_utc() - return self._timestamp_utc - - -class DetectorRecord: - _frame_records_by_marker_id: dict[str, DetectorFrameRecord] = Field(default_factory=dict) - - def __init__(self): - self._frame_records_by_marker_id = dict() - - def add_frame_record( - self, - frame_record: DetectorFrameRecord - ) -> None: - marker_ids: list[str] = frame_record.get_marker_ids_detected() - for marker_id in marker_ids: - if marker_id not in self._frame_records_by_marker_id or \ - frame_record.get_timestamp_utc() > self._frame_records_by_marker_id[marker_id].get_timestamp_utc(): - self._frame_records_by_marker_id[marker_id] = frame_record - - def clear_frame_records(self): - self._frame_records_by_marker_id.clear() - - def clear_frame_records_older_than( - self, - timestamp_utc: datetime.datetime - ) -> bool: - """ - returns True if any changes were made - """ - return_value: bool = False - marker_ids: list[str] = list(self._frame_records_by_marker_id.keys()) - for marker_id in marker_ids: - frame_record: DetectorFrameRecord = self._frame_records_by_marker_id[marker_id] - if frame_record.get_timestamp_utc() < timestamp_utc: - del self._frame_records_by_marker_id[marker_id] - return_value = True - return return_value - - def get_corners( - self - ) -> dict[str, list[list[float]]]: # [marker_id][point_index][x/y/z] - corners_by_marker_id: dict[str, list[list[float]]] = dict() - for marker_id, frame_record in self._frame_records_by_marker_id.items(): - corners_by_marker_id[marker_id] = frame_record.get_marker_corners_by_marker_id(marker_id=marker_id) - return corners_by_marker_id - - def get_corners_for_marker_id( - self, - marker_id: str - ) -> list[list[float]] | None: # [point_index][x/y/z] - if marker_id not in self._frame_records_by_marker_id: - return None - return self._frame_records_by_marker_id[marker_id].get_marker_corners_by_marker_id(marker_id=marker_id) - - -class PoseSolverConfiguration(BaseModel): - serial_identifier: str = Field() - - -class PoseSolverParameters(BaseModel): - minimum_detector_count: int = Field(default=2) - MAXIMUM_RAY_COUNT_FOR_INTERSECTION: int = Field(2) - POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS: float = Field(0.1) - POSE_SINGLE_CAMERA_EXTRAPOLATION_MINIMUM_SURFACE_NORMAL_ANGLE_DEGREES: float = Field(15.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS: float = Field(1.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_MAXIMUM_ORDER: int = Field(0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_ANGLE_DEGREES: float = Field(15.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_DISTANCE: float = Field(15.0, description="millimeters") - POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS: float = Field(0.8) - POSE_SINGLE_CAMERA_NEAREST_LIMIT_ANGLE_DEGREES: float = Field(15.0) - POSE_SINGLE_CAMERA_NEAREST_LIMIT_DISTANCE: float = Field(15.0) - POSE_SINGLE_CAMERA_REPROJECTION_ERROR_FACTOR_BETA_OVER_ALPHA: float = Field(1.0) - POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS: float = Field(0.4) - # TODO: Is this next one detector-specific? - POSE_SINGLE_CAMERA_DEPTH_CORRECTION: float = Field(-7.5, description="millimeters, observed tendency to overestimate depth.") - POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS: float = Field(1.0) - INTERSECTION_MAXIMUM_DISTANCE: float = Field(10.0, description="millimeters") - icp_termination_iteration_count: int = Field(50) - icp_termination_translation: float = Field(0.005, description="millimeters") - icp_termination_rotation_radians: float = Field(0.0005) - icp_termination_mean_point_distance: float = Field(0.1, description="millimeters") - icp_termination_rms_point_distance: float = Field(0.1, description="millimeters") - DENOISE_OUTLIER_DISTANCE_MILLIMETERS: float = Field(10.0) - DENOISE_OUTLIER_ANGLE_DEGREES: float = Field(5.0) - DENOISE_STORAGE_SIZE: int = Field(10) - DENOISE_FILTER_SIZE: int = Field(7) - DENOISE_REQUIRED_STARTING_STREAK: int = Field(3) - ARUCO_MARKER_DICTIONARY_ENUM: int = Field(cv2.aruco.DICT_4X4_100) - ARUCO_POSE_ESTIMATOR_METHOD: int = Field(cv2.SOLVEPNP_ITERATIVE) - # SOLVEPNP_ITERATIVE works okay but is susceptible to optical illusions (flipping) - # SOLVEPNP_P3P appears to return nan's on rare occasion - # SOLVEPNP_SQPNP appears to return nan's on rare occasion - # SOLVEPNP_IPPE_SQUARE does not seem to work very well at all, translation is much smaller than expected - - -class Ray: - source_point: list[float] - direction: list[float] - - def __init__( - self, - source_point: list[float], - direction: list[float] - ): - direction_norm = numpy.linalg.norm(direction) - if direction_norm < EPSILON: - raise ValueError("Direction cannot be zero.") - self.source_point = source_point - self.direction = direction diff --git a/src/pose_solver/util/__init__.py b/src/pose_solver/util/__init__.py deleted file mode 100644 index b800e4b..0000000 --- a/src/pose_solver/util/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .average_quaternion import average_quaternion -from .average_vector import average_vector -from .closest_point_on_ray import closest_point_on_ray -from .convex_quadrilateral_area import convex_quadrilateral_area -from .iterative_closest_point import \ - IterativeClosestPointParameters, \ - IterativeClosestPointOutput, \ - iterative_closest_point_for_points_and_rays -from .line_intersection import \ - closest_intersection_between_two_lines, \ - closest_intersection_between_n_lines diff --git a/src/pose_solver/util/average_quaternion.py b/src/pose_solver/util/average_quaternion.py deleted file mode 100644 index ae3d7cf..0000000 --- a/src/pose_solver/util/average_quaternion.py +++ /dev/null @@ -1,17 +0,0 @@ -# Solution based on this link: https://stackoverflow.com/a/27410865 -# based on Markley et al. "Averaging quaternions." Journal of Guidance, Control, and Dynamics 30.4 (2007): 1193-1197. -import numpy -from typing import List - - -def average_quaternion( - quaternions: List[List[float]] -) -> List[float]: - quaternion_matrix = numpy.array(quaternions, dtype="float32").transpose() # quaternions into columns - quaternion_matrix /= len(quaternions) - eigenvalues, eigenvectors = numpy.linalg.eig(numpy.matmul(quaternion_matrix, quaternion_matrix.transpose())) - maximum_eigenvalue_index = numpy.argmax(eigenvalues) - quaternion = eigenvectors[:, maximum_eigenvalue_index] - if quaternion[3] < 0: - quaternion *= -1 - return quaternion.tolist() diff --git a/src/pose_solver/util/average_vector.py b/src/pose_solver/util/average_vector.py deleted file mode 100644 index b61be16..0000000 --- a/src/pose_solver/util/average_vector.py +++ /dev/null @@ -1,17 +0,0 @@ -# This is a very simple function for averaging translations -# when it is not desired to use numpy (for whatever reason) -from typing import List - - -def average_vector( - translations: List[List[float]] -) -> List[float]: - sum_translations: List[float] = [0.0, 0.0, 0.0] - for translation in translations: - for i in range(0, 3): - sum_translations[i] += translation[i] - translation_count = len(translations) - return [ - sum_translations[0] / translation_count, - sum_translations[1] / translation_count, - sum_translations[2] / translation_count] diff --git a/src/pose_solver/util/closest_point_on_ray.py b/src/pose_solver/util/closest_point_on_ray.py deleted file mode 100644 index 94da76b..0000000 --- a/src/pose_solver/util/closest_point_on_ray.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import List - - -# Find the closest point on a ray in 3D -def closest_point_on_ray( - ray_source: List[float], - ray_direction: List[float], - query_point: List[float], - forward_only: bool -): - # Let ray_point be the closest point between query_point and the ray. - # (ray_point - query_point) will be perpendicular to ray_direction. - # Let ray_distance be the distance along the ray where the closest point is. - # So we have two equations: - # (1) (ray_point - query_point) * ray_direction = 0 - # (2) ray_point = ray_source + ray_distance * ray_direction - # If we substitute eq (2) into (1) and solve for ray_distance, we get: - ray_distance: float = ( - (query_point[0] * ray_direction[0] + query_point[1] * ray_direction[1] + query_point[2] * ray_direction[2] - - ray_source[0] * ray_direction[0] - ray_source[1] * ray_direction[1] - ray_source[2] * ray_direction[2]) - / - ((ray_direction[0] ** 2) + (ray_direction[1] ** 2) + (ray_direction[2] ** 2))) - - if ray_distance < 0 and forward_only: - return ray_source # point is behind the source, so the closest point is just the source - - ray_point = [0.0] * 3 # temporary values - for i in range(0, 3): - ray_point[i] = ray_source[i] + ray_distance * ray_direction[i] - return ray_point diff --git a/src/pose_solver/util/convex_quadrilateral_area.py b/src/pose_solver/util/convex_quadrilateral_area.py deleted file mode 100644 index 5b1b747..0000000 --- a/src/pose_solver/util/convex_quadrilateral_area.py +++ /dev/null @@ -1,64 +0,0 @@ -import numpy -from typing import Final - -EPSILON: Final[float] = 0.0001 - - -# General approach: -# Given points a, b, c, and d shown below, -# and calculating points e and f shown below, -# add areas defined by right triangles bea, ceb, dfc, and afd -# b..................c -# . .. ... -# . ... ... . -# . .. .f. . -# . .e. ... . -# . ... .. . -# ... ... . -# a...................d -def convex_quadrilateral_area( - points: list[list[float]] # 2D points in clockwise order -) -> float: - point_a = numpy.array(points[0], dtype="float32") - point_b = numpy.array(points[1], dtype="float32") - point_c = numpy.array(points[2], dtype="float32") - point_d = numpy.array(points[3], dtype="float32") - - vector_ac = point_c - point_a - vector_ac_norm = numpy.linalg.norm(vector_ac) - vector_bd = point_d - point_b - vector_bd_norm = numpy.linalg.norm(vector_bd) - if vector_ac_norm <= EPSILON or vector_bd_norm <= EPSILON: - return 0.0 - width_vector = vector_ac / numpy.linalg.norm(vector_ac) - height_vector = numpy.array([width_vector[1], -width_vector[0]], dtype="float32") # rotated 90 degrees - - sum_of_areas: float = 0.0 - point_pairs: list[tuple[numpy.ndarray, numpy.ndarray]] = [ - (point_a, point_b), - (point_b, point_c), - (point_c, point_d), - (point_d, point_a)] - for point_pair in point_pairs: - line_vector = point_pair[1] - point_pair[0] - width = numpy.dot(line_vector, width_vector) - height = numpy.dot(line_vector, height_vector) - sum_of_areas += numpy.abs(width * height / 2.0) - - return sum_of_areas - - -def test(): - points = [ - [1.0, 3.0], - [2.0, 5.0], - [5.0, 3.0], - [3.0, 2.0]] - area = 6.0 - assert abs(convex_quadrilateral_area(points) - area) <= EPSILON - assert abs(convex_quadrilateral_area([points[3]] + points[0:3]) - area) <= EPSILON - print("Success") - - -if __name__ == "__main__": - test() diff --git a/src/pose_solver/util/iterative_closest_point.py b/src/pose_solver/util/iterative_closest_point.py deleted file mode 100644 index d17626d..0000000 --- a/src/pose_solver/util/iterative_closest_point.py +++ /dev/null @@ -1,235 +0,0 @@ -# Algorithm is based on ICP: Besl and McKay. Method for registration of 3-D shapes. 1992. -import datetime # for testing, not needed for the algorithm itself -import numpy -from scipy.spatial.transform import Rotation -from src.pose_solver.structures import Ray -from .closest_point_on_ray import closest_point_on_ray -from src.common.util import register_corresponding_points - - -class IterativeClosestPointParameters: - # ICP will stop after this many iterations - termination_iteration_count: int - - # ICP will stop if distance *and* angle difference from one iteration to the next - # is smaller than these - termination_delta_translation: float - termination_delta_rotation_radians: float - - # ICP will stop if overall point-to-point distance (between source and target) - # mean *or* root-mean-square is less than specified - termination_mean_point_distance: float - termination_rms_point_distance: float # root-mean-square - - def __init__( - self, - termination_iteration_count: int, - termination_delta_translation: float, - termination_delta_rotation_radians: float, - termination_mean_point_distance: float, - termination_rms_point_distance: float - ): - self.termination_iteration_count = termination_iteration_count - self.termination_delta_translation = termination_delta_translation - self.termination_delta_rotation_radians = termination_delta_rotation_radians - self.termination_mean_point_distance = termination_mean_point_distance - self.termination_rms_point_distance = termination_rms_point_distance - - -class IterativeClosestPointOutput: - source_to_target_matrix: numpy.ndarray - iteration_count: int - mean_point_distance: float - rms_point_distance: float - - def __init__( - self, - source_to_target_matrix: numpy.ndarray, - iteration_count: int, - mean_point_distance: float, - rms_point_distance: float - ): - self.source_to_target_matrix = source_to_target_matrix - self.iteration_count = iteration_count - self.mean_point_distance = mean_point_distance - self.rms_point_distance = rms_point_distance - - -def _calculate_transformed_points( - original_points: list[list[float]], - transformation: numpy.ndarray -): - transformed_points: list[list[float]] = list() - for point in original_points: - transformed_point = list(numpy.matmul( - transformation, - numpy.array([point[0], point[1], point[2], 1]))) - transformed_points.append([transformed_point[0], transformed_point[1], transformed_point[2]]) - return transformed_points - - -# This is a customized implementation for Iterative Closest Point -# adapted to the problem of registering a set of points to -# a set of points and rays where the correspondence is known. -def iterative_closest_point_for_points_and_rays( - source_known_points: list[list[float]], - target_known_points: list[list[float]], - source_ray_points: list[list[float]], - target_rays: list[Ray], - parameters: IterativeClosestPointParameters = None, - initial_transformation_matrix: numpy.ndarray = None -) -> IterativeClosestPointOutput: - """ - :param source_known_points: points with known corresponding positions in both source and target coordinate frames - :param target_known_points: points with known corresponding positions in both source and target coordinate frames - :param source_ray_points: points with known position in the source coordinate frame, but NOT in target - :param target_rays: rays along which the remaining target points lie (1:1 correspondence with source_ray_points) - :param parameters: - :param initial_transformation_matrix: - """ - - if len(source_known_points) != len(target_known_points): - raise ValueError("source_known_points and target_known_points must be of equal length (1:1 correspondence).") - - if len(source_known_points) != len(target_known_points): - raise ValueError("source_ray_points and target_rays must be of equal length (1:1 correspondence).") - - # Initial transformation - source_to_transformed_matrix: numpy.ndarray - if initial_transformation_matrix is not None: - source_to_transformed_matrix = numpy.array(initial_transformation_matrix, dtype="float32") - else: - source_to_transformed_matrix = numpy.identity(4, dtype="float32") - - if parameters is None: - parameters = IterativeClosestPointParameters( - termination_iteration_count=50, - termination_delta_translation=0.1, - termination_delta_rotation_radians=0.001, - termination_mean_point_distance=0.1, - termination_rms_point_distance=0.1) - - transformed_known_points: list[list[float]] = _calculate_transformed_points( - original_points=source_known_points, - transformation=source_to_transformed_matrix) - transformed_ray_points: list[list[float]] = _calculate_transformed_points( - original_points=source_ray_points, - transformation=source_to_transformed_matrix) - - iteration_count: int = 0 - mean_point_distance: float - rms_point_distance: float - while True: - target_ray_points: list[list[float]] = list() - for i, transformed_ray_point in enumerate(transformed_ray_points): - target_ray_points.append(closest_point_on_ray( - ray_source=target_rays[i].source_point, - ray_direction=target_rays[i].direction, - query_point=transformed_ray_point, - forward_only=True)) - - transformed_points = transformed_known_points + transformed_ray_points - target_points = target_known_points + target_ray_points - transformed_to_target_matrix = register_corresponding_points( - point_set_from=transformed_points, - point_set_to=target_points, - collinearity_do_check=False) - - # update transformation & transformed points - source_to_transformed_matrix = numpy.matmul(transformed_to_target_matrix, source_to_transformed_matrix) - transformed_known_points: list[list[float]] = _calculate_transformed_points( - original_points=source_known_points, - transformation=source_to_transformed_matrix) - transformed_ray_points: list[list[float]] = _calculate_transformed_points( - original_points=source_ray_points, - transformation=source_to_transformed_matrix) - - iteration_count += 1 - - transformed_points = transformed_known_points + transformed_ray_points - point_offsets = numpy.subtract(target_points, transformed_points).tolist() - sum_point_distances = 0.0 - sum_square_point_distances = 0.0 - for delta_point_offset in point_offsets: - delta_point_distance: float = numpy.linalg.norm(delta_point_offset) - sum_point_distances += delta_point_distance - sum_square_point_distances += numpy.square(delta_point_distance) - mean_point_distance = sum_point_distances / len(point_offsets) - rms_point_distance = numpy.sqrt(sum_square_point_distances / len(point_offsets)) - - # Check if termination criteria are met - # Note that transformed_to_target_matrix describes the change since last iteration, so we often operate on it - delta_translation = numpy.linalg.norm(transformed_to_target_matrix[0:3, 3]) - delta_rotation_radians = \ - numpy.linalg.norm(Rotation.from_matrix(transformed_to_target_matrix[0:3, 0:3]).as_rotvec()) - if delta_translation < parameters.termination_delta_translation and \ - delta_rotation_radians < parameters.termination_delta_rotation_radians: - break - if mean_point_distance < parameters.termination_mean_point_distance: - break - if rms_point_distance < parameters.termination_rms_point_distance: - break - if iteration_count >= parameters.termination_iteration_count: - break - - return IterativeClosestPointOutput( - source_to_target_matrix=source_to_transformed_matrix, - iteration_count=iteration_count, - mean_point_distance=mean_point_distance, - rms_point_distance=rms_point_distance) - - -def test(): - # Transformation is approximately - source_known_points = [ - [2.0, 0.0, 2.0], - [2.0, 2.0, 2.0], - [2.0, 2.0, 0.0], - [2.0, 0.0, 0.0]] - source_ray_points = [ - [0.0, 2.0, 2.0], - [0.0, 0.0, 2.0], - [0.0, 0.0, 0.0], - [0.0, 2.0, 0.0]] - target_known_points = [ - [1.0, 1.0, -2.0], - [-1.0, 1.0, -2.0], - [-1.0, -1.0, -2.0], - [1.0, -1.0, -2.0]] - origin = [0.0, 0.0, 1.0] - sqrt3 = numpy.sqrt(3.0) - target_rays = [ - Ray(origin, [-sqrt3, sqrt3, -sqrt3]), - Ray(origin, [sqrt3, sqrt3, -sqrt3]), - Ray(origin, [sqrt3, -sqrt3, -sqrt3]), - Ray(origin, [-sqrt3, -sqrt3, -sqrt3])] - begin_datetime = datetime.datetime.utcnow() - icp_parameters = IterativeClosestPointParameters( - termination_iteration_count=100, - termination_delta_translation=0.001, - termination_delta_rotation_radians=0.001, - termination_mean_point_distance=0.0001, - termination_rms_point_distance=0.0001) - icp_output = iterative_closest_point_for_points_and_rays( - source_known_points=source_known_points, - target_known_points=target_known_points, - source_ray_points=source_ray_points, - target_rays=target_rays, - parameters=icp_parameters) - source_to_target_matrix = icp_output.source_to_target_matrix - end_datetime = datetime.datetime.utcnow() - duration = (end_datetime - begin_datetime) - duration_seconds = duration.seconds + (duration.microseconds / 1000000.0) - message = str() - for source_point in source_known_points: - source_4d = [source_point[0], source_point[1], source_point[2], 1] - target_4d = list(numpy.matmul(source_to_target_matrix, source_4d)) - target_point = [target_4d[0], target_4d[1], target_4d[2]] - message = message + str(target_point) + "\n" - message += "Algorithm took " + "{:.6f}".format(duration_seconds) + " seconds " + \ - "and took " + str(icp_output.iteration_count) + " iterations." - print(message) - - -if __name__ == "__main__": - test() diff --git a/src/pose_solver/util/line_intersection.py b/src/pose_solver/util/line_intersection.py deleted file mode 100644 index d4e82e4..0000000 --- a/src/pose_solver/util/line_intersection.py +++ /dev/null @@ -1,111 +0,0 @@ -import numpy -from src.pose_solver.structures import Ray - - -EPSILON: float = 0.0001 - - -class RayIntersection2: - parallel: bool # special case, mark it as such - closest_point_1: numpy.ndarray - closest_point_2: numpy.ndarray - - def __init__( - self, - parallel: bool, - closest_point_1: numpy.ndarray, - closest_point_2: numpy.ndarray - ): - self.parallel = parallel - self.closest_point_1 = closest_point_1 - self.closest_point_2 = closest_point_2 - - def centroid(self) -> numpy.ndarray: - return (self.closest_point_1 + self.closest_point_2) / 2 - - def distance(self) -> float: - return numpy.linalg.norm(self.closest_point_2 - self.closest_point_1) - - -class RayIntersectionN: - centroids: numpy.ndarray - - # How many rays were used. - # Note that centroids might not use all possible intersections (e.g. parallel rays) - ray_count: int - - def __init__( - self, - centroids: numpy.ndarray, - ray_count: int - ): - self.centroids = centroids - self.ray_count = ray_count - - def centroid(self) -> numpy.ndarray: - sum_centroids = numpy.asarray([0, 0, 0], dtype="float32") - for centroid in self.centroids: - sum_centroids += centroid - return sum_centroids / self.centroids.shape[0] - - def intersection_count(self) -> int: - return int((self.ray_count * (self.ray_count - 1))/2) - - -def closest_intersection_between_two_lines( - ray_1: Ray, - ray_2: Ray -) -> RayIntersection2: # Returns data on intersection - ray_1_direction_normalized = ray_1.direction / numpy.linalg.norm(ray_1.direction) - ray_2_direction_normalized = ray_2.direction / numpy.linalg.norm(ray_2.direction) - - # ray 3 will be perpendicular to both rays 1 and 2, - # and will intersect with both rays at the nearest point(s) - - ray_3_direction = numpy.cross(ray_2_direction_normalized, ray_1_direction_normalized) - ray_3_direction_norm = numpy.linalg.norm(ray_3_direction) - if ray_3_direction_norm < EPSILON: - return RayIntersection2( - parallel=True, - closest_point_1=ray_1.source_point, - closest_point_2=ray_2.source_point) - - # system of equations Ax = b - b = numpy.subtract(ray_2.source_point, ray_1.source_point) - a = numpy.asarray( - [ray_1_direction_normalized, -ray_2_direction_normalized, ray_3_direction], dtype="float32").transpose() - x = numpy.linalg.solve(a, b) - - param_ray_1 = float(x[0]) - intersection_point_1 = ray_1.source_point + param_ray_1 * ray_1_direction_normalized - - param_ray_2 = float(x[1]) - intersection_point_2 = ray_2.source_point + param_ray_2 * ray_2_direction_normalized - - return RayIntersection2( - parallel=False, - closest_point_1=intersection_point_1, - closest_point_2=intersection_point_2) - - -def closest_intersection_between_n_lines( - rays: list[Ray], - maximum_distance: float -) -> RayIntersectionN: - ray_count = len(rays) - intersections: list[RayIntersection2] = list() - for ray_1_index in range(0, ray_count): - for ray_2_index in range(ray_1_index + 1, ray_count): - intersections.append(closest_intersection_between_two_lines( - ray_1=rays[ray_1_index], - ray_2=rays[ray_2_index])) - centroids: list[numpy.ndarray] = list() - for intersection in intersections: - if intersection.parallel: - continue - if intersection.distance() > maximum_distance: - continue - centroids.append(intersection.centroid()) - return RayIntersectionN( - centroids=numpy.asarray(centroids, dtype="float32"), - ray_count=ray_count) diff --git a/src/slicer_connection.py b/src/slicer_connection.py index 5888790..359665f 100644 --- a/src/slicer_connection.py +++ b/src/slicer_connection.py @@ -1,23 +1,11 @@ -import asyncio import sys import hjson -import numpy as np import pyigtl import time as t import logging -from src.common.api.mct_request_series import MCTRequestSeries -from src.common.structures.component_role_label import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER -from src.common.structures.target import TargetBase from src.controller.mct_controller import MCTController -from src.controller.structures.mct_component_address import MCTComponentAddress -from ipaddress import IPv4Address -from src.pose_solver.api import PoseSolverAddTargetMarkerRequest -from src.pose_solver.api import TargetMarker -from src.pose_solver.api import PoseSolverSetReferenceRequest - -from src.controller.structures.connection import Connection # Input filepath is specified by command line arguments if len(sys.argv) < 2: diff --git a/src/util/generate_target_definition_from_charuco.py b/src/util/generate_target_definition_from_charuco.py index 7f3c354..369d101 100644 --- a/src/util/generate_target_definition_from_charuco.py +++ b/src/util/generate_target_definition_from_charuco.py @@ -1,22 +1,21 @@ -from src.common.structures import \ - CharucoBoardSpecification, \ - TargetBoard -from src.common.structures.target import _Marker +from src.implementations.common_aruco_opencv import ArucoOpenCVCommon +from src.common import Annotation, Landmark, Target -board: CharucoBoardSpecification = CharucoBoardSpecification() +board: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() points: list[list[float]] = board.get_marker_corner_points() -markers: list[_Marker] = list() +landmarks: list[Landmark] = list() POINTS_PER_MARKER: int = 4 marker_count: int = round(int(len(points) / POINTS_PER_MARKER)) for marker_index in range(marker_count): point_start_index: int = marker_index * POINTS_PER_MARKER marker_points = points[point_start_index: point_start_index + POINTS_PER_MARKER] - markers.append(_Marker( - marker_id=f"{marker_index}", - points=marker_points)) -target: TargetBoard = TargetBoard( - target_id="reference", - markers=markers) + for corner_index, corner_point in enumerate(marker_points): + landmarks.append(Landmark( + feature_label=f"{marker_index}{Annotation.RELATION_CHARACTER}{corner_index}", + x=corner_point[0], y=corner_point[1], z=corner_point[2])) +target: Target = Target( + label="CharucoBoard", + landmarks=landmarks) with open("temp.json", 'w') as outfile: - outfile.write(target.json(exclude_unset=True, indent=2)) + outfile.write(target.model_dump_json(exclude_unset=True, indent=2)) diff --git a/src/util/measure_detector_to_reference.py b/src/util/measure_detector_to_reference.py index de29a79..c616964 100644 --- a/src/util/measure_detector_to_reference.py +++ b/src/util/measure_detector_to_reference.py @@ -1,20 +1,17 @@ import asyncio import sys import hjson -from ipaddress import IPv4Address import json import logging import numpy as np import os from time import sleep -from timeit import main from scipy.spatial.transform import Rotation as R from src.board_builder.board_builder import BoardBuilder -from src.common.structures.component_role_label import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER -from src.controller.mct_controller import MCTController -from src.controller.structures.mct_component_address import MCTComponentAddress -from src.pose_solver.util import average_quaternion, average_vector +from src.controller import Connection, MCTController +from src.common import MathUtils +from src.detector import Detector # input_filepath = "/home/adminpi5/Documents/MCSTrack/data/measure_detector_to_reference_config.json" if len(sys.argv) < 2: @@ -41,12 +38,11 @@ async def main(): ITERATIONS = 20 for detector in detectors: - controller.add_connection(MCTComponentAddress( - label=detector['label'], - role=COMPONENT_ROLE_LABEL_DETECTOR, - ip_address=detector['ip_address'], - port=detector['port'] - )) + controller.add_connection(Connection.ComponentAddress( + label=detector['label'], + role=Detector.get_role_label(), + ip_address=detector['ip_address'], + port=detector['port'])) all_measured_transforms_by_detector[detector['label']] = [] controller.start_up() @@ -69,7 +65,7 @@ async def main(): controller.update() frame = controller.get_live_detector_frame(detector_label) - detectors_and_their_frames[detector_label] = frame.detected_marker_snapshots + detectors_and_their_frames[detector_label] = frame.annotations_identified board_builder.locate_reference_board(detectors_and_their_frames) @@ -92,8 +88,8 @@ async def main(): quaternions.append(quaternion) translations.append(translation) - avg_quaternion = average_quaternion(quaternions) - avg_translation = average_vector(translations) + avg_quaternion = MathUtils.average_quaternion(quaternions) + avg_translation = MathUtils.average_vector(translations) avg_rotation_matrix = R.from_quat(avg_quaternion).as_matrix() diff --git a/test/images/simulated/ideal/C01_FA0.png b/test/images/simulated/ideal/C01_FA0.png new file mode 100644 index 0000000..e0015e0 --- /dev/null +++ b/test/images/simulated/ideal/C01_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bf9f47804b1a43a39bcf184c2a9ce46b7fe71bab34a8d458ac93132c01d256e +size 1232577 diff --git a/test/images/simulated/ideal/C01_FB1.png b/test/images/simulated/ideal/C01_FB1.png new file mode 100644 index 0000000..50edd49 --- /dev/null +++ b/test/images/simulated/ideal/C01_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:558de379919c9c7ff33b417f201e667fa73217be10903b5c616abce0514aa878 +size 1284285 diff --git a/test/images/simulated/ideal/C01_FB2.png b/test/images/simulated/ideal/C01_FB2.png new file mode 100644 index 0000000..a23c5a2 --- /dev/null +++ b/test/images/simulated/ideal/C01_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7224f0f49e8cd507d04b420e0b0599960c847eff6dea2cafd041d0e4a816396c +size 1277557 diff --git a/test/images/simulated/ideal/C01_FB3.png b/test/images/simulated/ideal/C01_FB3.png new file mode 100644 index 0000000..3acdb73 --- /dev/null +++ b/test/images/simulated/ideal/C01_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a6655fbd38573abdfe49fbb17b7d3ff5228f7fb38c23405a0c0564f140f6dc +size 1282160 diff --git a/test/images/simulated/ideal/C01_FB4.png b/test/images/simulated/ideal/C01_FB4.png new file mode 100644 index 0000000..06094f8 --- /dev/null +++ b/test/images/simulated/ideal/C01_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9dfc36745eeb7d89ed49a2a2bfe1b27729620eebde33260bb2027c616493d20 +size 1279933 diff --git a/test/images/simulated/ideal/C01_FC1.png b/test/images/simulated/ideal/C01_FC1.png new file mode 100644 index 0000000..137be2d --- /dev/null +++ b/test/images/simulated/ideal/C01_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e9ef2bd6364c71bfd298f97a1b2ef258c6d5128f3a7e387461c94ba5b08ddb +size 1257919 diff --git a/test/images/simulated/ideal/C01_FC2.png b/test/images/simulated/ideal/C01_FC2.png new file mode 100644 index 0000000..5c354b6 --- /dev/null +++ b/test/images/simulated/ideal/C01_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6767c58ab6f60207a16c83c3fdba25062444bdad689068683878ce6b409b97e3 +size 1229439 diff --git a/test/images/simulated/ideal/C01_FC3.png b/test/images/simulated/ideal/C01_FC3.png new file mode 100644 index 0000000..e9fd0f0 --- /dev/null +++ b/test/images/simulated/ideal/C01_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3df485628ef87d73b6c0e4d26a2b12a9e7a5f70bcdb47decc4fea4c0571c84d0 +size 1257269 diff --git a/test/images/simulated/ideal/C01_FC4.png b/test/images/simulated/ideal/C01_FC4.png new file mode 100644 index 0000000..7c9ecd2 --- /dev/null +++ b/test/images/simulated/ideal/C01_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc7ceaca31583454d21df589a6493f13abd5f22dd74c037ddf79eacdbcb5d9db +size 1234268 diff --git a/test/images/simulated/ideal/C01_FD1.png b/test/images/simulated/ideal/C01_FD1.png new file mode 100644 index 0000000..0e90ec8 --- /dev/null +++ b/test/images/simulated/ideal/C01_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08e7e25d39f17bbf0900e559fd06564d2b45555b670011198f14380c05406a31 +size 1300928 diff --git a/test/images/simulated/ideal/C01_FD2.png b/test/images/simulated/ideal/C01_FD2.png new file mode 100644 index 0000000..b8d03d1 --- /dev/null +++ b/test/images/simulated/ideal/C01_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e026840a4d346a8619dc1e6ccc44dccbad916a8782e81e1db113edfb297a3ad5 +size 1204230 diff --git a/test/images/simulated/ideal/C01_FD3.png b/test/images/simulated/ideal/C01_FD3.png new file mode 100644 index 0000000..2e2adde --- /dev/null +++ b/test/images/simulated/ideal/C01_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a5bb94ca24c881e7c6d079487d2cd1eb358dd91acc5ab25a4936c896b745182 +size 1304665 diff --git a/test/images/simulated/ideal/C01_FD4.png b/test/images/simulated/ideal/C01_FD4.png new file mode 100644 index 0000000..0d8eea7 --- /dev/null +++ b/test/images/simulated/ideal/C01_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85404d91dfa7290ecfba48e61d445ebab6fd16f9f861fe41fedacf48a84c3a07 +size 1205644 diff --git a/test/images/simulated/ideal/C01_FE1.png b/test/images/simulated/ideal/C01_FE1.png new file mode 100644 index 0000000..692a6b0 --- /dev/null +++ b/test/images/simulated/ideal/C01_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68f686718bfbc4468efdf641ee72ea96112604c7d52fb85c4411d64c99f3f3c3 +size 1273829 diff --git a/test/images/simulated/ideal/C01_FE2.png b/test/images/simulated/ideal/C01_FE2.png new file mode 100644 index 0000000..91d7b51 --- /dev/null +++ b/test/images/simulated/ideal/C01_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c982ec501acb240012df7641cfb2c6cff56d8b83ca07ec92d350393ed31a384 +size 1249297 diff --git a/test/images/simulated/ideal/C01_FE3.png b/test/images/simulated/ideal/C01_FE3.png new file mode 100644 index 0000000..96d86ed --- /dev/null +++ b/test/images/simulated/ideal/C01_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b554f4ed5b1e86e967c907a45535b784cbe892eb2f939ec11cf8cc8afcfd455 +size 1203610 diff --git a/test/images/simulated/ideal/C02_FA0.png b/test/images/simulated/ideal/C02_FA0.png new file mode 100644 index 0000000..e79ace3 --- /dev/null +++ b/test/images/simulated/ideal/C02_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7e3cd025bfac00bf2ab9d5e400c457481833d5629fc78576716a02f0f633ac5 +size 1288497 diff --git a/test/images/simulated/ideal/C02_FB1.png b/test/images/simulated/ideal/C02_FB1.png new file mode 100644 index 0000000..7735c5f --- /dev/null +++ b/test/images/simulated/ideal/C02_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75abf5a3c79e6a64d02034ac47ae376338f1be0b872e5239eae74f8dac202669 +size 1279945 diff --git a/test/images/simulated/ideal/C02_FB2.png b/test/images/simulated/ideal/C02_FB2.png new file mode 100644 index 0000000..c87047a --- /dev/null +++ b/test/images/simulated/ideal/C02_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a27d9f28322eca9183f0160dd0fa7ad062b2742a0d6b0f6f40bd24d0c13af95f +size 1292887 diff --git a/test/images/simulated/ideal/C02_FB3.png b/test/images/simulated/ideal/C02_FB3.png new file mode 100644 index 0000000..3b92667 --- /dev/null +++ b/test/images/simulated/ideal/C02_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1fdf94f6794c071fcd8fa0a6f58663333f374167b7d9c2af99a001361040fbd +size 1263241 diff --git a/test/images/simulated/ideal/C02_FB4.png b/test/images/simulated/ideal/C02_FB4.png new file mode 100644 index 0000000..d122a6f --- /dev/null +++ b/test/images/simulated/ideal/C02_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08cc29ca7aa5aaa47f284df4b1e61ce60860f74080b58d66874b299b4ebbbc90 +size 1278370 diff --git a/test/images/simulated/ideal/C02_FC1.png b/test/images/simulated/ideal/C02_FC1.png new file mode 100644 index 0000000..94777fd --- /dev/null +++ b/test/images/simulated/ideal/C02_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c39a7a1051bae31f22f52b88b09f72db3fa67e35d77d4c469225f4063da70b06 +size 1240421 diff --git a/test/images/simulated/ideal/C02_FC2.png b/test/images/simulated/ideal/C02_FC2.png new file mode 100644 index 0000000..b2823b2 --- /dev/null +++ b/test/images/simulated/ideal/C02_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5074264a809c6ec13e240a45f1d04f402b5a32a3462c8d5e92a34d07f181ba09 +size 1226174 diff --git a/test/images/simulated/ideal/C02_FC3.png b/test/images/simulated/ideal/C02_FC3.png new file mode 100644 index 0000000..9576c30 --- /dev/null +++ b/test/images/simulated/ideal/C02_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e47bdf7f4892bbda429d66395e5c445f15d955268f2697f5844a3bc73b4e6821 +size 1219472 diff --git a/test/images/simulated/ideal/C02_FC4.png b/test/images/simulated/ideal/C02_FC4.png new file mode 100644 index 0000000..7a393f8 --- /dev/null +++ b/test/images/simulated/ideal/C02_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2edfb3fb0a60792694ee171abd7ebb52aea79d99e8ceaae4cf714f52928441b +size 1218974 diff --git a/test/images/simulated/ideal/C02_FD1.png b/test/images/simulated/ideal/C02_FD1.png new file mode 100644 index 0000000..735dbf1 --- /dev/null +++ b/test/images/simulated/ideal/C02_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5af5bd3ee12bcdfdb7cbd5501e71d5fcb83364bac7014716580db341cc7f469 +size 1190362 diff --git a/test/images/simulated/ideal/C02_FD2.png b/test/images/simulated/ideal/C02_FD2.png new file mode 100644 index 0000000..07bfb33 --- /dev/null +++ b/test/images/simulated/ideal/C02_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f8ca0bb256270a6ad186fe55b04906a4ca21669bcbab83cf12480a2a2036bc0 +size 1274873 diff --git a/test/images/simulated/ideal/C02_FD3.png b/test/images/simulated/ideal/C02_FD3.png new file mode 100644 index 0000000..7b73797 --- /dev/null +++ b/test/images/simulated/ideal/C02_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26d7164c7467a1727ae9438feb17bbfaeb972e4dd18f36d0207993658cd1b9fa +size 1291826 diff --git a/test/images/simulated/ideal/C02_FD4.png b/test/images/simulated/ideal/C02_FD4.png new file mode 100644 index 0000000..f35ae5f --- /dev/null +++ b/test/images/simulated/ideal/C02_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bc88eaacd404c4ce155445963eb860a9f6bc1e005096d92a6850f1ee4d2d594 +size 1185253 diff --git a/test/images/simulated/ideal/C02_FE1.png b/test/images/simulated/ideal/C02_FE1.png new file mode 100644 index 0000000..ea4778c --- /dev/null +++ b/test/images/simulated/ideal/C02_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5af866e4afed0c3d3efd691a27c02be41eaf4aae68165ef09cb380699efac75 +size 1310222 diff --git a/test/images/simulated/ideal/C02_FE2.png b/test/images/simulated/ideal/C02_FE2.png new file mode 100644 index 0000000..582baf6 --- /dev/null +++ b/test/images/simulated/ideal/C02_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22585f081780eda6d6a796806c790f3d7e0289e41826d048908e5353b1c81370 +size 1306641 diff --git a/test/images/simulated/ideal/C02_FE3.png b/test/images/simulated/ideal/C02_FE3.png new file mode 100644 index 0000000..684bb84 --- /dev/null +++ b/test/images/simulated/ideal/C02_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0ca123c94385f477c4cc776f5512a9f897411ffda6016b2039809c13d2e8f72 +size 1206274 diff --git a/test/images/simulated/ideal/C03_FA0.png b/test/images/simulated/ideal/C03_FA0.png new file mode 100644 index 0000000..2c7e078 --- /dev/null +++ b/test/images/simulated/ideal/C03_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d89b40adab42eebf4d5a110c50a7a4b6be2fca602a51dc9e6d412185a0f4d83d +size 1233551 diff --git a/test/images/simulated/ideal/C03_FB1.png b/test/images/simulated/ideal/C03_FB1.png new file mode 100644 index 0000000..4033c2b --- /dev/null +++ b/test/images/simulated/ideal/C03_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c297262251e7e0e92dcb1bb43dc53d68b086eeffb89da07235a98b57380323b9 +size 1298337 diff --git a/test/images/simulated/ideal/C03_FB2.png b/test/images/simulated/ideal/C03_FB2.png new file mode 100644 index 0000000..92ad8d2 --- /dev/null +++ b/test/images/simulated/ideal/C03_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80b64da1dd31e8d4e806468e3e58b5cbfc669cc92d5a8f9cd5576cc0d99f06dd +size 1294380 diff --git a/test/images/simulated/ideal/C03_FB3.png b/test/images/simulated/ideal/C03_FB3.png new file mode 100644 index 0000000..66dae32 --- /dev/null +++ b/test/images/simulated/ideal/C03_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eb977fff5b69488220f35be0d48e09ff510fd34506807100e854645392f773b +size 1265355 diff --git a/test/images/simulated/ideal/C03_FB4.png b/test/images/simulated/ideal/C03_FB4.png new file mode 100644 index 0000000..127499c --- /dev/null +++ b/test/images/simulated/ideal/C03_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd3208aea61c558ee9bebe0e452d82111d2d511a4555d87202eac14babae4cb1 +size 1267374 diff --git a/test/images/simulated/ideal/C03_FC1.png b/test/images/simulated/ideal/C03_FC1.png new file mode 100644 index 0000000..2c9e8e8 --- /dev/null +++ b/test/images/simulated/ideal/C03_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f56d055980cf7268e9f6357be5d25713e6d07a96d5e5e59c92ad85993b8b87b +size 1262504 diff --git a/test/images/simulated/ideal/C03_FC2.png b/test/images/simulated/ideal/C03_FC2.png new file mode 100644 index 0000000..fb83d1d --- /dev/null +++ b/test/images/simulated/ideal/C03_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f21ff35b63cdf85dac5687ec9d3bf32817b5f68b1bf03548b0985ce54378fc +size 1261828 diff --git a/test/images/simulated/ideal/C03_FC3.png b/test/images/simulated/ideal/C03_FC3.png new file mode 100644 index 0000000..89c7120 --- /dev/null +++ b/test/images/simulated/ideal/C03_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2caece8489a8185202296915dcaeaa43b954c559dfc0aa3080d81a2e606de8ff +size 1236088 diff --git a/test/images/simulated/ideal/C03_FC4.png b/test/images/simulated/ideal/C03_FC4.png new file mode 100644 index 0000000..22500e1 --- /dev/null +++ b/test/images/simulated/ideal/C03_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d73c5dffba357e14a0328da288c32b508710cc7a8962d94b1124a6d0f473ddc6 +size 1236033 diff --git a/test/images/simulated/ideal/C03_FD1.png b/test/images/simulated/ideal/C03_FD1.png new file mode 100644 index 0000000..7f54b66 --- /dev/null +++ b/test/images/simulated/ideal/C03_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac4149848f89718be401759aa42f446b05e1ba23b44f0fde0026cb3b6a2be42b +size 1305665 diff --git a/test/images/simulated/ideal/C03_FD2.png b/test/images/simulated/ideal/C03_FD2.png new file mode 100644 index 0000000..dc1e76b --- /dev/null +++ b/test/images/simulated/ideal/C03_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31a47e294c109673a99b208f2b13180d7ce554ede3ad667dcd27452fb0e39b82 +size 1302756 diff --git a/test/images/simulated/ideal/C03_FD3.png b/test/images/simulated/ideal/C03_FD3.png new file mode 100644 index 0000000..347bfa2 --- /dev/null +++ b/test/images/simulated/ideal/C03_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10f7d0573e8abce21d2adbe95881ed0762187382867e4568dd385828b8c6aee8 +size 1206465 diff --git a/test/images/simulated/ideal/C03_FD4.png b/test/images/simulated/ideal/C03_FD4.png new file mode 100644 index 0000000..a4c41dd --- /dev/null +++ b/test/images/simulated/ideal/C03_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f47ddf5f0f74e8a28a21194bc16c9869ac8d54abab47d09b3b5fb3e96021fd81 +size 1204077 diff --git a/test/images/simulated/ideal/C03_FE1.png b/test/images/simulated/ideal/C03_FE1.png new file mode 100644 index 0000000..3043adc --- /dev/null +++ b/test/images/simulated/ideal/C03_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51517b3a58198e53e3f266e41a3da5c61d6f66726dcbaf419b4af0d04a970f3b +size 1296973 diff --git a/test/images/simulated/ideal/C03_FE2.png b/test/images/simulated/ideal/C03_FE2.png new file mode 100644 index 0000000..bc30587 --- /dev/null +++ b/test/images/simulated/ideal/C03_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b304bd98f7b64ba63797d2c2f40142f8f61d10c8ef772381e970cdbdb40e067c +size 1170676 diff --git a/test/images/simulated/ideal/C03_FE3.png b/test/images/simulated/ideal/C03_FE3.png new file mode 100644 index 0000000..4d79a60 --- /dev/null +++ b/test/images/simulated/ideal/C03_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fce3ca475607e56e828766122fd57d922c3f16aa6a15cb2bb83009103ec6ba57 +size 1285763 diff --git a/test/images/simulated/ideal/C04_FA0.png b/test/images/simulated/ideal/C04_FA0.png new file mode 100644 index 0000000..c2d6f3c --- /dev/null +++ b/test/images/simulated/ideal/C04_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31a64eb3cfd1ac8a6cc5bbd143f57ad62aaae98073aec268e0592d7293dcc52d +size 1288973 diff --git a/test/images/simulated/ideal/C04_FB1.png b/test/images/simulated/ideal/C04_FB1.png new file mode 100644 index 0000000..f0a63f9 --- /dev/null +++ b/test/images/simulated/ideal/C04_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37872871dd20d0561f62f015d325a2f5253d35f30f7f0b2a99c28aa362fb9d6 +size 1306500 diff --git a/test/images/simulated/ideal/C04_FB2.png b/test/images/simulated/ideal/C04_FB2.png new file mode 100644 index 0000000..10f0a38 --- /dev/null +++ b/test/images/simulated/ideal/C04_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61380b9bf1adf7da9a1d54dc94582cbd189ff02be66c3e9753f67288272b004a +size 1274520 diff --git a/test/images/simulated/ideal/C04_FB3.png b/test/images/simulated/ideal/C04_FB3.png new file mode 100644 index 0000000..7320da7 --- /dev/null +++ b/test/images/simulated/ideal/C04_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14380c23953252866e7a3ce44410148a679d4e87c6b49262ccb2a91ed45838b3 +size 1283879 diff --git a/test/images/simulated/ideal/C04_FB4.png b/test/images/simulated/ideal/C04_FB4.png new file mode 100644 index 0000000..e1bbcb9 --- /dev/null +++ b/test/images/simulated/ideal/C04_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9626a0a347b3bdebfeadd35833e2e6d5d8b42c862b773e1fa7df79d994cbdc36 +size 1279182 diff --git a/test/images/simulated/ideal/C04_FC1.png b/test/images/simulated/ideal/C04_FC1.png new file mode 100644 index 0000000..46a764c --- /dev/null +++ b/test/images/simulated/ideal/C04_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20bd37d9df549c697ddefe8c70a185ef28918a9b48596b46be218cc38d696693 +size 1226535 diff --git a/test/images/simulated/ideal/C04_FC2.png b/test/images/simulated/ideal/C04_FC2.png new file mode 100644 index 0000000..8eaf531 --- /dev/null +++ b/test/images/simulated/ideal/C04_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:340439956f379de6ef50ce7e9295c12c4a4c6aea636a84a54b1d097cac295b8b +size 1238751 diff --git a/test/images/simulated/ideal/C04_FC3.png b/test/images/simulated/ideal/C04_FC3.png new file mode 100644 index 0000000..c565c1b --- /dev/null +++ b/test/images/simulated/ideal/C04_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b6fec4fd7160f0cabd33254396030fdc9da094ec23e66f66ad8e84cc9acc212 +size 1219473 diff --git a/test/images/simulated/ideal/C04_FC4.png b/test/images/simulated/ideal/C04_FC4.png new file mode 100644 index 0000000..4c6a211 --- /dev/null +++ b/test/images/simulated/ideal/C04_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6663e15497f69af6cb928f250bdd93f97a1a7eeace7fc73c935ae53731129cd6 +size 1219918 diff --git a/test/images/simulated/ideal/C04_FD1.png b/test/images/simulated/ideal/C04_FD1.png new file mode 100644 index 0000000..123c7ca --- /dev/null +++ b/test/images/simulated/ideal/C04_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94a593dfa33400f1c2533ffb6a17f6f53ebc089f28a2f837045349e2241df05f +size 1291913 diff --git a/test/images/simulated/ideal/C04_FD2.png b/test/images/simulated/ideal/C04_FD2.png new file mode 100644 index 0000000..29e5df6 --- /dev/null +++ b/test/images/simulated/ideal/C04_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f94a8c8eebaf17ff514b5f217404cecfeb362c580b388649c361428c7bc4346 +size 1189579 diff --git a/test/images/simulated/ideal/C04_FD3.png b/test/images/simulated/ideal/C04_FD3.png new file mode 100644 index 0000000..fd1368f --- /dev/null +++ b/test/images/simulated/ideal/C04_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db1f75897901d63dfa18b24f016fe4de953865684b57a1893568d5c547d0ab3b +size 1184625 diff --git a/test/images/simulated/ideal/C04_FD4.png b/test/images/simulated/ideal/C04_FD4.png new file mode 100644 index 0000000..635bb03 --- /dev/null +++ b/test/images/simulated/ideal/C04_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bc0fe4fbed76c7183a311d02aae304bf8b1188f114b9e0d6980bf5f60dfee39 +size 1274947 diff --git a/test/images/simulated/ideal/C04_FE1.png b/test/images/simulated/ideal/C04_FE1.png new file mode 100644 index 0000000..7322772 --- /dev/null +++ b/test/images/simulated/ideal/C04_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5964bd3102387e811a414f225f79e4de9253395b5e71c6118e73b207f055a3f +size 1207453 diff --git a/test/images/simulated/ideal/C04_FE2.png b/test/images/simulated/ideal/C04_FE2.png new file mode 100644 index 0000000..3a23fed --- /dev/null +++ b/test/images/simulated/ideal/C04_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b30f4153fcf3e609acb7ad25104deb229207ec911a340e836392a1d2266ed700 +size 1316420 diff --git a/test/images/simulated/ideal/C04_FE3.png b/test/images/simulated/ideal/C04_FE3.png new file mode 100644 index 0000000..cf4e873 --- /dev/null +++ b/test/images/simulated/ideal/C04_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27a50c95291a43bfd4fd1ead9c5c770cff1e6a12e866efef88eccd80c3914026 +size 1314098 diff --git a/test/images/simulated/ideal/C05_FA0.png b/test/images/simulated/ideal/C05_FA0.png new file mode 100644 index 0000000..e34f43d --- /dev/null +++ b/test/images/simulated/ideal/C05_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ace6bd924eddb716cfe45a5f6b0283f369e93178b2011264891a603fa174a4ff +size 1233126 diff --git a/test/images/simulated/ideal/C05_FB1.png b/test/images/simulated/ideal/C05_FB1.png new file mode 100644 index 0000000..9c7393e --- /dev/null +++ b/test/images/simulated/ideal/C05_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04b2e333f07bf6d1b70ed29cfc6ea9a3474f0f521823113f67b2630cc6d433a5 +size 1279021 diff --git a/test/images/simulated/ideal/C05_FB2.png b/test/images/simulated/ideal/C05_FB2.png new file mode 100644 index 0000000..c5bebfd --- /dev/null +++ b/test/images/simulated/ideal/C05_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc2e08483eb9a8669c3716d4784da2339449e99d3f350454f62e01cc2a7eed72 +size 1281794 diff --git a/test/images/simulated/ideal/C05_FB3.png b/test/images/simulated/ideal/C05_FB3.png new file mode 100644 index 0000000..924d09d --- /dev/null +++ b/test/images/simulated/ideal/C05_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e2b42600afc08d84c455f68826426dc44ce9ba2e1beaabeb215fba015d6b5d1 +size 1275044 diff --git a/test/images/simulated/ideal/C05_FB4.png b/test/images/simulated/ideal/C05_FB4.png new file mode 100644 index 0000000..693e6ca --- /dev/null +++ b/test/images/simulated/ideal/C05_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:139693e02cca3c936f6d0c4d40eb076bb1f307e3745491f8fa950e9d26074e48 +size 1284170 diff --git a/test/images/simulated/ideal/C05_FC1.png b/test/images/simulated/ideal/C05_FC1.png new file mode 100644 index 0000000..274b93d --- /dev/null +++ b/test/images/simulated/ideal/C05_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4997ee771be0b9253258780257a6795ef4fcbd8b3e40470c365331276ade2ac +size 1234950 diff --git a/test/images/simulated/ideal/C05_FC2.png b/test/images/simulated/ideal/C05_FC2.png new file mode 100644 index 0000000..f2fe746 --- /dev/null +++ b/test/images/simulated/ideal/C05_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:423ca9785dd24eb81eb51a15d1790570bbc25f58798fd8e32b47d3f0db390549 +size 1256728 diff --git a/test/images/simulated/ideal/C05_FC3.png b/test/images/simulated/ideal/C05_FC3.png new file mode 100644 index 0000000..b1a3869 --- /dev/null +++ b/test/images/simulated/ideal/C05_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdf218bfb18ef802c87c7277f9c12b8f94115caee7e5474c322c0b50c07e464a +size 1230402 diff --git a/test/images/simulated/ideal/C05_FC4.png b/test/images/simulated/ideal/C05_FC4.png new file mode 100644 index 0000000..23b453c --- /dev/null +++ b/test/images/simulated/ideal/C05_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8428378e23bd243747f74ab55c3e78bb762562be724924f57e3761c8c48cf9a4 +size 1257375 diff --git a/test/images/simulated/ideal/C05_FD1.png b/test/images/simulated/ideal/C05_FD1.png new file mode 100644 index 0000000..d1ba0f3 --- /dev/null +++ b/test/images/simulated/ideal/C05_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5ae976964212b95cbe005a9805f9d0ca1d189dedfdfc23e2297cd5b7e9405bb +size 1206791 diff --git a/test/images/simulated/ideal/C05_FD2.png b/test/images/simulated/ideal/C05_FD2.png new file mode 100644 index 0000000..02b5ecd --- /dev/null +++ b/test/images/simulated/ideal/C05_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f6b4f8981d537b3ebf2ed106960de4d504b99bc1fc8954185d3347af3040151 +size 1306077 diff --git a/test/images/simulated/ideal/C05_FD3.png b/test/images/simulated/ideal/C05_FD3.png new file mode 100644 index 0000000..2dbf067 --- /dev/null +++ b/test/images/simulated/ideal/C05_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f56196b324e531155d846d33354818f9eada8c9ea17d98ca6b78fdcde2505b5b +size 1205689 diff --git a/test/images/simulated/ideal/C05_FD4.png b/test/images/simulated/ideal/C05_FD4.png new file mode 100644 index 0000000..2feddbe --- /dev/null +++ b/test/images/simulated/ideal/C05_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a9403a96a7ff9549aa671b590c6c3c8442ced86fb08bb91d2ae905ff7cd9184 +size 1302322 diff --git a/test/images/simulated/ideal/C05_FE1.png b/test/images/simulated/ideal/C05_FE1.png new file mode 100644 index 0000000..e7fc09a --- /dev/null +++ b/test/images/simulated/ideal/C05_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa86cadc666f7742e7cb5246beed8ba652a618fa198f554c60e128d03ac1eb14 +size 1203688 diff --git a/test/images/simulated/ideal/C05_FE2.png b/test/images/simulated/ideal/C05_FE2.png new file mode 100644 index 0000000..14c6e18 --- /dev/null +++ b/test/images/simulated/ideal/C05_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f80a2972b128ba8295600fe0ff62cb5c524b6d47509ec586fb7d836b3056988 +size 1273183 diff --git a/test/images/simulated/ideal/C05_FE3.png b/test/images/simulated/ideal/C05_FE3.png new file mode 100644 index 0000000..deccaf4 --- /dev/null +++ b/test/images/simulated/ideal/C05_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ea393fc378bd4d02c9c34410460c5a4b6d093d3aaedb2a6faabe36e6a8ccbc +size 1270786 diff --git a/test/images/simulated/ideal/C06_FA0.png b/test/images/simulated/ideal/C06_FA0.png new file mode 100644 index 0000000..b7c9188 --- /dev/null +++ b/test/images/simulated/ideal/C06_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e58fc7a300deab704a5dd8f7eb3fc4101ebd25bddc5e633f37345b35a33f54de +size 1208671 diff --git a/test/images/simulated/ideal/C06_FB1.png b/test/images/simulated/ideal/C06_FB1.png new file mode 100644 index 0000000..6f2ea06 --- /dev/null +++ b/test/images/simulated/ideal/C06_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0884d60581cb7cf20b4ac43e3e12112a5245e9708901c3b3f4aae0c021a5212 +size 1256113 diff --git a/test/images/simulated/ideal/C06_FB2.png b/test/images/simulated/ideal/C06_FB2.png new file mode 100644 index 0000000..6ad27b2 --- /dev/null +++ b/test/images/simulated/ideal/C06_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d4297cdd15687617713d63da9a782973e1b88ebfd753e1ee5daa32043206528 +size 1289015 diff --git a/test/images/simulated/ideal/C06_FB3.png b/test/images/simulated/ideal/C06_FB3.png new file mode 100644 index 0000000..4299319 --- /dev/null +++ b/test/images/simulated/ideal/C06_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bba781bd37866cd3232ead4d6e32a0e4e546cac4171e79c66bb265ecd798baa8 +size 1259215 diff --git a/test/images/simulated/ideal/C06_FB4.png b/test/images/simulated/ideal/C06_FB4.png new file mode 100644 index 0000000..aca4997 --- /dev/null +++ b/test/images/simulated/ideal/C06_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:164e0f7197c513515f222f04f87e3d30133c0867eb4b7518c52906b2c03cbf97 +size 1299267 diff --git a/test/images/simulated/ideal/C06_FC1.png b/test/images/simulated/ideal/C06_FC1.png new file mode 100644 index 0000000..837fc18 --- /dev/null +++ b/test/images/simulated/ideal/C06_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e345ec13535a775fc01aba6a6b0d9e19b4cab67684d9a900362da4e5a78604ce +size 1212068 diff --git a/test/images/simulated/ideal/C06_FC2.png b/test/images/simulated/ideal/C06_FC2.png new file mode 100644 index 0000000..754bd82 --- /dev/null +++ b/test/images/simulated/ideal/C06_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4155addc9fa8934707f9697a1cc74bdb21a7688a713bcecad6b4976e1ad53588 +size 1199468 diff --git a/test/images/simulated/ideal/C06_FC3.png b/test/images/simulated/ideal/C06_FC3.png new file mode 100644 index 0000000..250ea8b --- /dev/null +++ b/test/images/simulated/ideal/C06_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72194fc00184d87b6d5abc4f5cbbe1ccb1c8d17d1e01476fc41cf9f5ebed0707 +size 1211910 diff --git a/test/images/simulated/ideal/C06_FC4.png b/test/images/simulated/ideal/C06_FC4.png new file mode 100644 index 0000000..21bba1c --- /dev/null +++ b/test/images/simulated/ideal/C06_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:515ee3f6b56a29ac3de1cbceac2adf1eebf9676ef6f35e66ae81e677dd8037cc +size 1204429 diff --git a/test/images/simulated/ideal/C06_FD1.png b/test/images/simulated/ideal/C06_FD1.png new file mode 100644 index 0000000..9e50e0b --- /dev/null +++ b/test/images/simulated/ideal/C06_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa74f68a12524f544858d0634b3892b59df2275ec71bbe0342479bf1ddb99aa5 +size 1283711 diff --git a/test/images/simulated/ideal/C06_FD2.png b/test/images/simulated/ideal/C06_FD2.png new file mode 100644 index 0000000..e873654 --- /dev/null +++ b/test/images/simulated/ideal/C06_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c03d76b2142bd8e9e37a763ead3090d49e5c88d0fb14abe8a518ff11cbc8b32f +size 1180193 diff --git a/test/images/simulated/ideal/C06_FD3.png b/test/images/simulated/ideal/C06_FD3.png new file mode 100644 index 0000000..db22f61 --- /dev/null +++ b/test/images/simulated/ideal/C06_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdcd3504c36f1cab7945209219fc41513b1f76b213931af1964c0a018f0971d4 +size 1283701 diff --git a/test/images/simulated/ideal/C06_FD4.png b/test/images/simulated/ideal/C06_FD4.png new file mode 100644 index 0000000..e96ae39 --- /dev/null +++ b/test/images/simulated/ideal/C06_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56b9ca69503cfa481deddf2e8670eddb757be47b3d5a678c6663d4d598f685f5 +size 1193130 diff --git a/test/images/simulated/ideal/C06_FE1.png b/test/images/simulated/ideal/C06_FE1.png new file mode 100644 index 0000000..a204a9d --- /dev/null +++ b/test/images/simulated/ideal/C06_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dde0332c757efbec7d07fd047e323148d489080c12913f64336031b73ce4c378 +size 1268617 diff --git a/test/images/simulated/ideal/C06_FE2.png b/test/images/simulated/ideal/C06_FE2.png new file mode 100644 index 0000000..fe9f747 --- /dev/null +++ b/test/images/simulated/ideal/C06_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1ead6d1808c8b9d5fab45bd9540c552a8347146df39ba1eca9f81451ac1f211 +size 1235108 diff --git a/test/images/simulated/ideal/C06_FE3.png b/test/images/simulated/ideal/C06_FE3.png new file mode 100644 index 0000000..8ca6443 --- /dev/null +++ b/test/images/simulated/ideal/C06_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6c5832b289e03c37c154c1b8db2510ccc67863e690099e60a26d30b94f62c0f +size 1182082 diff --git a/test/images/simulated/ideal/C07_FA0.png b/test/images/simulated/ideal/C07_FA0.png new file mode 100644 index 0000000..e6fc719 --- /dev/null +++ b/test/images/simulated/ideal/C07_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42f3f895971d23b5f6dc33540662c2da0efea8951131ee26844b91c742f15bc2 +size 1284751 diff --git a/test/images/simulated/ideal/C07_FB1.png b/test/images/simulated/ideal/C07_FB1.png new file mode 100644 index 0000000..126c02e --- /dev/null +++ b/test/images/simulated/ideal/C07_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:132e947c7b0add4ff087419ebb81ab39b21c8306475ebc91859c5b8a7b5c31a4 +size 1252320 diff --git a/test/images/simulated/ideal/C07_FB2.png b/test/images/simulated/ideal/C07_FB2.png new file mode 100644 index 0000000..8529647 --- /dev/null +++ b/test/images/simulated/ideal/C07_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b10e2084add858f308dbe3001e2b1845efb8758b376603eeecac966c6fdfc1d7 +size 1288518 diff --git a/test/images/simulated/ideal/C07_FB3.png b/test/images/simulated/ideal/C07_FB3.png new file mode 100644 index 0000000..dc77a77 --- /dev/null +++ b/test/images/simulated/ideal/C07_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cba23a291e20ab5bde9cedd4560f25fedb0d98b6afa9d18b635b81dd5aa233a +size 1232176 diff --git a/test/images/simulated/ideal/C07_FB4.png b/test/images/simulated/ideal/C07_FB4.png new file mode 100644 index 0000000..f2f35f9 --- /dev/null +++ b/test/images/simulated/ideal/C07_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56c87b58a7cd45bf0a3f00d58129a086a0b63eae1c84454457ddc0117bc61f8a +size 1279125 diff --git a/test/images/simulated/ideal/C07_FC1.png b/test/images/simulated/ideal/C07_FC1.png new file mode 100644 index 0000000..eaa5ea9 --- /dev/null +++ b/test/images/simulated/ideal/C07_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9f8c81e8998f871eab031fdbac2f417f9cc72f57ba5d7a7c9fd22944a8927da +size 1194565 diff --git a/test/images/simulated/ideal/C07_FC2.png b/test/images/simulated/ideal/C07_FC2.png new file mode 100644 index 0000000..d3c5b24 --- /dev/null +++ b/test/images/simulated/ideal/C07_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51987d6d42597be1523b61d77fb3fed3fead1268fa38fe2c4bd35da4567d9bbb +size 1203638 diff --git a/test/images/simulated/ideal/C07_FC3.png b/test/images/simulated/ideal/C07_FC3.png new file mode 100644 index 0000000..f6d3db8 --- /dev/null +++ b/test/images/simulated/ideal/C07_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de0f0b898753ffea8640e6d1243493a1647c776ffc8668f6e63c0f979cfcabf4 +size 1196855 diff --git a/test/images/simulated/ideal/C07_FC4.png b/test/images/simulated/ideal/C07_FC4.png new file mode 100644 index 0000000..628db69 --- /dev/null +++ b/test/images/simulated/ideal/C07_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd7d7dca4b30106257d5d34b82de634a086f72b5df3e7c9b1daf2056c26182a0 +size 1194578 diff --git a/test/images/simulated/ideal/C07_FD1.png b/test/images/simulated/ideal/C07_FD1.png new file mode 100644 index 0000000..55dbe18 --- /dev/null +++ b/test/images/simulated/ideal/C07_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c87bd067d5158728bba9c213c7b5ea85a5afa44dc4a5d422028166aa65878bb7 +size 1179444 diff --git a/test/images/simulated/ideal/C07_FD2.png b/test/images/simulated/ideal/C07_FD2.png new file mode 100644 index 0000000..65516b5 --- /dev/null +++ b/test/images/simulated/ideal/C07_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cc9d9e79c70179ddfa240f17d476ef3b081f25521508f5ca0c13f69408063ef +size 1267416 diff --git a/test/images/simulated/ideal/C07_FD3.png b/test/images/simulated/ideal/C07_FD3.png new file mode 100644 index 0000000..d517c81 --- /dev/null +++ b/test/images/simulated/ideal/C07_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84f3e408e0648048783c748efffa74c49a201139e3bca64a855f030e90c4f13c +size 1274955 diff --git a/test/images/simulated/ideal/C07_FD4.png b/test/images/simulated/ideal/C07_FD4.png new file mode 100644 index 0000000..a8d7c8a --- /dev/null +++ b/test/images/simulated/ideal/C07_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f627215ec71537d06f891b9b03db6c57bb50a940a0441e97ef3d0009dd82d4f +size 1170489 diff --git a/test/images/simulated/ideal/C07_FE1.png b/test/images/simulated/ideal/C07_FE1.png new file mode 100644 index 0000000..de54c0e --- /dev/null +++ b/test/images/simulated/ideal/C07_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbccde6b1245e197359f930b5b905c0b7b6b2df2950e0a73e32049e0ab622cc4 +size 1305414 diff --git a/test/images/simulated/ideal/C07_FE2.png b/test/images/simulated/ideal/C07_FE2.png new file mode 100644 index 0000000..ea1c149 --- /dev/null +++ b/test/images/simulated/ideal/C07_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:260e3065666ee2b5d3835dcf086547c906dd9174e671f987dcb3cac3780b5cc6 +size 1299195 diff --git a/test/images/simulated/ideal/C07_FE3.png b/test/images/simulated/ideal/C07_FE3.png new file mode 100644 index 0000000..359b8cf --- /dev/null +++ b/test/images/simulated/ideal/C07_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e1e3dd84c9e8925cf8a2a28730cf35214ed7746b262d778e60d7634881f3363 +size 1185807 diff --git a/test/images/simulated/ideal/C08_FA0.png b/test/images/simulated/ideal/C08_FA0.png new file mode 100644 index 0000000..04d5d0d --- /dev/null +++ b/test/images/simulated/ideal/C08_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44350a09cea2e7ac972d01a295185ad33b1b89c82f06eacfda61ceafcc21c019 +size 1207331 diff --git a/test/images/simulated/ideal/C08_FB1.png b/test/images/simulated/ideal/C08_FB1.png new file mode 100644 index 0000000..fe5be1e --- /dev/null +++ b/test/images/simulated/ideal/C08_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c56a844a48ceacbf41e07d1c4f1e2ff6e786090ff12b2c66d1ee38559676bf9 +size 1278628 diff --git a/test/images/simulated/ideal/C08_FB2.png b/test/images/simulated/ideal/C08_FB2.png new file mode 100644 index 0000000..910724b --- /dev/null +++ b/test/images/simulated/ideal/C08_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93cba4618e1727ff75d44d400d0a1fc3717558078cd11b07f9cc6156bdbb3cf1 +size 1272782 diff --git a/test/images/simulated/ideal/C08_FB3.png b/test/images/simulated/ideal/C08_FB3.png new file mode 100644 index 0000000..37ed699 --- /dev/null +++ b/test/images/simulated/ideal/C08_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:302d271d44177e890c27c1715fa21e113b0640d6369e0cd071bd4ee799ef4c5e +size 1283928 diff --git a/test/images/simulated/ideal/C08_FB4.png b/test/images/simulated/ideal/C08_FB4.png new file mode 100644 index 0000000..bd04d07 --- /dev/null +++ b/test/images/simulated/ideal/C08_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae4b9eae30a5cab4c3cb07595eda9625f64eabdd78ebc73b3a81e4cd133a7cf3 +size 1282322 diff --git a/test/images/simulated/ideal/C08_FC1.png b/test/images/simulated/ideal/C08_FC1.png new file mode 100644 index 0000000..29180cc --- /dev/null +++ b/test/images/simulated/ideal/C08_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cba2ef2e114f8fc9e097243382318bcc247ac9b72244cedfb990cbcc512fb8af +size 1212558 diff --git a/test/images/simulated/ideal/C08_FC2.png b/test/images/simulated/ideal/C08_FC2.png new file mode 100644 index 0000000..c37aa3c --- /dev/null +++ b/test/images/simulated/ideal/C08_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:090e874012b2a1edc5857ea4b78f0774c8e50afee75cc6d771738e5db807634d +size 1212804 diff --git a/test/images/simulated/ideal/C08_FC3.png b/test/images/simulated/ideal/C08_FC3.png new file mode 100644 index 0000000..757ea35 --- /dev/null +++ b/test/images/simulated/ideal/C08_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73522150153ea3f518514e513a4dd5ed47c51c4e7818eb480de1e51a897c0efa +size 1192940 diff --git a/test/images/simulated/ideal/C08_FC4.png b/test/images/simulated/ideal/C08_FC4.png new file mode 100644 index 0000000..dcb84cf --- /dev/null +++ b/test/images/simulated/ideal/C08_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7baae4728a077a7dcd0a22c1e5bd3e290fceffbb141217654cbc350de2e2328f +size 1192237 diff --git a/test/images/simulated/ideal/C08_FD1.png b/test/images/simulated/ideal/C08_FD1.png new file mode 100644 index 0000000..2e1422d --- /dev/null +++ b/test/images/simulated/ideal/C08_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85cd548f64c8eed0349774f83d0c9c4c9d3d2e5d79606411595fcaad73b8f1c0 +size 1282507 diff --git a/test/images/simulated/ideal/C08_FD2.png b/test/images/simulated/ideal/C08_FD2.png new file mode 100644 index 0000000..de3c822 --- /dev/null +++ b/test/images/simulated/ideal/C08_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10dadff94993f9d607a535dfabbe6ee1ffa47f40b6f3cb2d3512e76f7a847b2d +size 1283288 diff --git a/test/images/simulated/ideal/C08_FD3.png b/test/images/simulated/ideal/C08_FD3.png new file mode 100644 index 0000000..02f7c38 --- /dev/null +++ b/test/images/simulated/ideal/C08_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec09e6e06acfc0474cd8311da3f956e730489ebcff0fa639548ffd72f9811f79 +size 1193213 diff --git a/test/images/simulated/ideal/C08_FD4.png b/test/images/simulated/ideal/C08_FD4.png new file mode 100644 index 0000000..4446be7 --- /dev/null +++ b/test/images/simulated/ideal/C08_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2c856395a0923e0d29d3500931137bbc3e2caae0a48cd899f68938b4376e238 +size 1179135 diff --git a/test/images/simulated/ideal/C08_FE1.png b/test/images/simulated/ideal/C08_FE1.png new file mode 100644 index 0000000..35231c0 --- /dev/null +++ b/test/images/simulated/ideal/C08_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ca3d9a6773df5d18431610146bcc6c3c2017ae4253da3a6bd508a178b0807de +size 1291841 diff --git a/test/images/simulated/ideal/C08_FE2.png b/test/images/simulated/ideal/C08_FE2.png new file mode 100644 index 0000000..18cb47b --- /dev/null +++ b/test/images/simulated/ideal/C08_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ddccec14ecfb07d481a58372adef25932ff7d7e2f4cc75ef89d1167d39ab532 +size 1177217 diff --git a/test/images/simulated/ideal/C08_FE3.png b/test/images/simulated/ideal/C08_FE3.png new file mode 100644 index 0000000..71abb5a --- /dev/null +++ b/test/images/simulated/ideal/C08_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5daf43772b05b759370133db665934d2cff98aea6775c5e588c4a15e14032887 +size 1274898 diff --git a/test/images/simulated/ideal/C09_FA0.png b/test/images/simulated/ideal/C09_FA0.png new file mode 100644 index 0000000..9687259 --- /dev/null +++ b/test/images/simulated/ideal/C09_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:072a17790997ac45ab4c8c2f38a7f2fc2bd53e50fbfce6f111b0cfd3aadf3120 +size 1281445 diff --git a/test/images/simulated/ideal/C09_FB1.png b/test/images/simulated/ideal/C09_FB1.png new file mode 100644 index 0000000..b1f697a --- /dev/null +++ b/test/images/simulated/ideal/C09_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f86a187ea2bff9bbf93904280d139a18f9c66562c0a5456a4a64bed70c15785 +size 1298470 diff --git a/test/images/simulated/ideal/C09_FB2.png b/test/images/simulated/ideal/C09_FB2.png new file mode 100644 index 0000000..18baf8f --- /dev/null +++ b/test/images/simulated/ideal/C09_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94ff27dc9d8eb125f3916d5930a2bc12b17a4f83014732095cdb92163e44394d +size 1247026 diff --git a/test/images/simulated/ideal/C09_FB3.png b/test/images/simulated/ideal/C09_FB3.png new file mode 100644 index 0000000..8167855 --- /dev/null +++ b/test/images/simulated/ideal/C09_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5e13c48ae34f5d398ad08d6961498e74f3a47cbaa3a9f45681ce8b939c27771 +size 1275944 diff --git a/test/images/simulated/ideal/C09_FB4.png b/test/images/simulated/ideal/C09_FB4.png new file mode 100644 index 0000000..657c0d8 --- /dev/null +++ b/test/images/simulated/ideal/C09_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86bc91442e4f660402a364e78da3d139532a74fcae54d61c3cf4c10c522f6e9a +size 1244595 diff --git a/test/images/simulated/ideal/C09_FC1.png b/test/images/simulated/ideal/C09_FC1.png new file mode 100644 index 0000000..81f81d7 --- /dev/null +++ b/test/images/simulated/ideal/C09_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28470512c52a82dd746c94ee8da1c7b8319e55a8d257880694b1d00e51f291ca +size 1198124 diff --git a/test/images/simulated/ideal/C09_FC2.png b/test/images/simulated/ideal/C09_FC2.png new file mode 100644 index 0000000..c850cb9 --- /dev/null +++ b/test/images/simulated/ideal/C09_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1147802f007378d31d20d3ec358699976af3d4fe165eb5c7ee1f24d53bd32dd8 +size 1192527 diff --git a/test/images/simulated/ideal/C09_FC3.png b/test/images/simulated/ideal/C09_FC3.png new file mode 100644 index 0000000..895b972 --- /dev/null +++ b/test/images/simulated/ideal/C09_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10c56359b630c8eb265c246c70316bdbe358c43ed5b2ac2977a570c7084c748d +size 1192540 diff --git a/test/images/simulated/ideal/C09_FC4.png b/test/images/simulated/ideal/C09_FC4.png new file mode 100644 index 0000000..210f082 --- /dev/null +++ b/test/images/simulated/ideal/C09_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:232281e6ed6400e7c1ed9a22b2d12f4a5b50562f448f1dff6d4d1f89c17c92d7 +size 1195078 diff --git a/test/images/simulated/ideal/C09_FD1.png b/test/images/simulated/ideal/C09_FD1.png new file mode 100644 index 0000000..918b9d5 --- /dev/null +++ b/test/images/simulated/ideal/C09_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:151a89efe82bb32bdf743f8457415de6c75fc3c135b1f227e2d6fa5d4b602bb3 +size 1273026 diff --git a/test/images/simulated/ideal/C09_FD2.png b/test/images/simulated/ideal/C09_FD2.png new file mode 100644 index 0000000..a2e77fc --- /dev/null +++ b/test/images/simulated/ideal/C09_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70ac100516e78ec21daf28b38ad555b20d7f9a27804cb31ace0a1ec7ed089df9 +size 1176896 diff --git a/test/images/simulated/ideal/C09_FD3.png b/test/images/simulated/ideal/C09_FD3.png new file mode 100644 index 0000000..77d3711 --- /dev/null +++ b/test/images/simulated/ideal/C09_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2d402401c505925efa8af0682eddf5777a7324bbf07ca947ed3fc7c7786f9b2 +size 1168861 diff --git a/test/images/simulated/ideal/C09_FD4.png b/test/images/simulated/ideal/C09_FD4.png new file mode 100644 index 0000000..cb3e8fd --- /dev/null +++ b/test/images/simulated/ideal/C09_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2decd304560a0b0fd03c9eb193737993dc33d5665a740cdea5c70ebb208cb701 +size 1264227 diff --git a/test/images/simulated/ideal/C09_FE1.png b/test/images/simulated/ideal/C09_FE1.png new file mode 100644 index 0000000..b5fc27d --- /dev/null +++ b/test/images/simulated/ideal/C09_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cf62ca13e6a828f2b532ca5604d97c69f4407b413146831088b830bd3b7bc70 +size 1188121 diff --git a/test/images/simulated/ideal/C09_FE2.png b/test/images/simulated/ideal/C09_FE2.png new file mode 100644 index 0000000..e860322 --- /dev/null +++ b/test/images/simulated/ideal/C09_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8daa8d3cbd03337099263138aaaae0d58ecac98507f01003017509609c552cf +size 1292796 diff --git a/test/images/simulated/ideal/C09_FE3.png b/test/images/simulated/ideal/C09_FE3.png new file mode 100644 index 0000000..f21bd65 --- /dev/null +++ b/test/images/simulated/ideal/C09_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4ca94f00b54c45722669a77ff64e1acf7da3e7feb96d2d37ced42cf923526cc +size 1307856 diff --git a/test/images/simulated/ideal/C10_FA0.png b/test/images/simulated/ideal/C10_FA0.png new file mode 100644 index 0000000..d831a0d --- /dev/null +++ b/test/images/simulated/ideal/C10_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9ea2b453c981c3513872dca52650e7c1e90102ed00f58b585034b4e99b0669d +size 1210237 diff --git a/test/images/simulated/ideal/C10_FB1.png b/test/images/simulated/ideal/C10_FB1.png new file mode 100644 index 0000000..7fb8cee --- /dev/null +++ b/test/images/simulated/ideal/C10_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e36266dc2b3c1a358e846a3174bd847039ab63c86eb4de38d40102e1bde6f70 +size 1300095 diff --git a/test/images/simulated/ideal/C10_FB2.png b/test/images/simulated/ideal/C10_FB2.png new file mode 100644 index 0000000..acaeedb --- /dev/null +++ b/test/images/simulated/ideal/C10_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9203c027aaba16532690f39a6ee519cd8af68f40a877d300f7f82de64f8861e +size 1260990 diff --git a/test/images/simulated/ideal/C10_FB3.png b/test/images/simulated/ideal/C10_FB3.png new file mode 100644 index 0000000..7acb646 --- /dev/null +++ b/test/images/simulated/ideal/C10_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfa87b15eb137ec661c9e5f7c3ede9869e5aca44276abf596e4d8e8314edc950 +size 1289728 diff --git a/test/images/simulated/ideal/C10_FB4.png b/test/images/simulated/ideal/C10_FB4.png new file mode 100644 index 0000000..b40f68c --- /dev/null +++ b/test/images/simulated/ideal/C10_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e2f392082ce341d1e0b2d4c5dc9bdfb51f0d5680fc876026ce56b847a9924d4 +size 1258300 diff --git a/test/images/simulated/ideal/C10_FC1.png b/test/images/simulated/ideal/C10_FC1.png new file mode 100644 index 0000000..3d3dcb6 --- /dev/null +++ b/test/images/simulated/ideal/C10_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a2dda3c318f5e4f34c6bd4cb90f2afee654e4a6f86f0ddf0d796e668c3dba96 +size 1205711 diff --git a/test/images/simulated/ideal/C10_FC2.png b/test/images/simulated/ideal/C10_FC2.png new file mode 100644 index 0000000..c840eee --- /dev/null +++ b/test/images/simulated/ideal/C10_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e7637df71efdbd8ad5607c64461486d9e59c348f03fd1fd5c22db0db760cd5c +size 1211787 diff --git a/test/images/simulated/ideal/C10_FC3.png b/test/images/simulated/ideal/C10_FC3.png new file mode 100644 index 0000000..f118206 --- /dev/null +++ b/test/images/simulated/ideal/C10_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48d43d3d550bc5b0384c9fda972b8e51c3aaece8e2c55aaf83d8b1cee6e66b65 +size 1201509 diff --git a/test/images/simulated/ideal/C10_FC4.png b/test/images/simulated/ideal/C10_FC4.png new file mode 100644 index 0000000..3cdb70c --- /dev/null +++ b/test/images/simulated/ideal/C10_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b3e54546a0ae6559159beb81b8efbd8ba6643329e3370a18e69773f67446ed7 +size 1212204 diff --git a/test/images/simulated/ideal/C10_FD1.png b/test/images/simulated/ideal/C10_FD1.png new file mode 100644 index 0000000..4a4be24 --- /dev/null +++ b/test/images/simulated/ideal/C10_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:728e1a397ed745e29fe5e410b160dfbaad1a5f6b8b60b7cd14dbfe93285f11bc +size 1195040 diff --git a/test/images/simulated/ideal/C10_FD2.png b/test/images/simulated/ideal/C10_FD2.png new file mode 100644 index 0000000..67a5490 --- /dev/null +++ b/test/images/simulated/ideal/C10_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca5a64c6d622ca0b2f5e1a43d72f89ec9421c22c6ef5f2ed530e97bd491acbb7 +size 1285930 diff --git a/test/images/simulated/ideal/C10_FD3.png b/test/images/simulated/ideal/C10_FD3.png new file mode 100644 index 0000000..ee68c13 --- /dev/null +++ b/test/images/simulated/ideal/C10_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7c3ec5f10b5cc552303e7c5d5dc71d093839349452ff0c2d96bb4cd59180f6d +size 1181306 diff --git a/test/images/simulated/ideal/C10_FD4.png b/test/images/simulated/ideal/C10_FD4.png new file mode 100644 index 0000000..4853af7 --- /dev/null +++ b/test/images/simulated/ideal/C10_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34312f42da5f393364c3db9072f89dd5a1fcf64812b440ffa1eb01d98405244b +size 1287139 diff --git a/test/images/simulated/ideal/C10_FE1.png b/test/images/simulated/ideal/C10_FE1.png new file mode 100644 index 0000000..924e238 --- /dev/null +++ b/test/images/simulated/ideal/C10_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5364b626b26cbed4a00fd3e6c8e1e70eb701d42fd16be2464081b55a5056496a +size 1183342 diff --git a/test/images/simulated/ideal/C10_FE2.png b/test/images/simulated/ideal/C10_FE2.png new file mode 100644 index 0000000..ada89b9 --- /dev/null +++ b/test/images/simulated/ideal/C10_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10768669908715812dd798af55f42c44218f78cdd7174b1c84808ce33370c237 +size 1266312 diff --git a/test/images/simulated/ideal/C10_FE3.png b/test/images/simulated/ideal/C10_FE3.png new file mode 100644 index 0000000..535c2df --- /dev/null +++ b/test/images/simulated/ideal/C10_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c6d984edc7ec98c4c8dd6be704a7963be15d69a9a0dddb5613efe08856709ff +size 1274775 diff --git a/test/images/simulated/ideal/about.txt b/test/images/simulated/ideal/about.txt new file mode 100644 index 0000000..6e2d46a --- /dev/null +++ b/test/images/simulated/ideal/about.txt @@ -0,0 +1,13 @@ +This folder contains a set of simulated images in order to help test code. +Each file is named as follows: +C_F.png + +Each camera is 1 meter away from a central pivot point at the origin. +Each camera has its y axis aligned with the world's up direction (+z-axis in Blender). +Cameras 01 through 05 are all oriented 30 degrees relative to the table surface, each increasingly rotated 45 degrees around the world origin. +Cameras 06 through 10 are all oriented 45 degrees relative to the table surface, each increasingly rotated 45 degrees around the world origin +All cameras use the same viewing angle and image resolutions. + +Frames from the A and B sets will have the ChArUco board visible to all cameras. +Frames from the C and D sets will have the ChArUco board visible to most cameras but not all. +Frames from the E set are designed to be visible only to specific limited sets of cameras. diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py new file mode 100644 index 0000000..7fa38b7 --- /dev/null +++ b/test/test_extrinsic_calibration.py @@ -0,0 +1,190 @@ +from src.common import \ + ExtrinsicCalibration, \ + ExtrinsicCalibrationDetectorResult, \ + ImageResolution, \ + ImageUtils, \ + IntrinsicParameters, \ + KeyValueSimpleAny, \ + KeyValueSimpleString +from src.implementations.common_aruco_opencv import \ + ArucoOpenCVCommon +from src.implementations.extrinsic_charuco_opencv import \ + CharucoOpenCVExtrinsicCalibrator +import cv2 +import datetime +import numpy +import os +import re +from scipy.spatial.transform import Rotation +from tempfile import TemporaryDirectory +from typing import Final +import unittest + + +IMAGE_CONTENT_PATTERN: Final[re.Pattern] = re.compile(r"C([a-zA-Z0-9]+)_F([a-zA-Z0-9]+).png") +IMAGE_CONTENT_MATCH_INDEX_CAMERA: Final[int] = 1 +IMAGE_CONTENT_MATCH_INDEX_FRAME: Final[int] = 2 +IMAGE_RESOLUTION: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) +MARKER_DETECTION_PARAMETERS: list[KeyValueSimpleAny] = [ + KeyValueSimpleString( + key=ArucoOpenCVCommon.KEY_CORNER_REFINEMENT_METHOD, + value=ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_SUBPIX)] +THRESHOLD_TRANSLATION_IN_PLANE_MM: Final[float] = 10 +THRESHOLD_TRANSLATION_OUT_OF_PLANE_MM: Final[float] = 25 +THRESHOLD_ROTATION_DEG: Final[float] = 1 + + +class TestPoseSolver(unittest.TestCase): + def test(self): + # Organize ourselves with respect to the input data + image_location: str = os.path.join("images", "simulated", "ideal") + image_contents: list[str] = os.listdir(image_location) + image_filepaths_by_camera_frame: dict[str, dict[str, str]] = dict() # Access as: x[CameraID][FrameID] + image_filepaths_by_frame_camera: dict[str, dict[str, str]] = dict() # Access as: x[FrameID][CameraID] + timestamps_iso8601_by_frame: dict[str, str] = dict() # Access as: x[FrameID] + reference_time: datetime.datetime = datetime.datetime.now(tz=datetime.timezone.utc) + image_count: int = 0 + for image_content in image_contents: + if image_content == "about.txt": + continue + + image_filepath: str = os.path.join(image_location, image_content) + if not os.path.isfile(image_filepath): + continue + + match: re.Match = IMAGE_CONTENT_PATTERN.match(image_content) + if match is None: + self.fail( + f"The input filename {image_content} did not match the expected pattern. " + "Were files moved or added?") + + camera_id: str = match.group(IMAGE_CONTENT_MATCH_INDEX_CAMERA) + frame_id: str = match.group(IMAGE_CONTENT_MATCH_INDEX_FRAME) + if camera_id not in image_filepaths_by_camera_frame: + image_filepaths_by_camera_frame[camera_id] = dict() + image_filepaths_by_camera_frame[camera_id][frame_id] = image_filepath + if frame_id not in image_filepaths_by_frame_camera: + image_filepaths_by_frame_camera[frame_id] = dict() + timestamps_iso8601_by_frame[frame_id] = ( + reference_time + - datetime.timedelta(hours=1) + + datetime.timedelta(seconds=image_count)).isoformat() + image_filepaths_by_frame_camera[frame_id][camera_id] = image_filepath + image_count += 1 + + # All cameras have the same imaging parameters. + # These were calculated by hand assuming lenses without any distortions + intrinsic_parameters: IntrinsicParameters = IntrinsicParameters( + focal_length_x_px=3582.76878, + focal_length_y_px=3640.38430, + optical_center_x_px=960.0, + optical_center_y_px=540.0, + radial_distortion_coefficients=[0, 0, 0], + tangential_distortion_coefficients=[0, 0]) + + intrinsics_by_camera: dict[str, IntrinsicParameters] = dict() # Access as x[camera_id] + for camera_id in image_filepaths_by_camera_frame.keys(): + intrinsics_by_camera[camera_id] = intrinsic_parameters + + extrinsic_calibrator: CharucoOpenCVExtrinsicCalibrator + extrinsic_calibration: ExtrinsicCalibration + with TemporaryDirectory() as temppath: + extrinsic_calibrator: CharucoOpenCVExtrinsicCalibrator = CharucoOpenCVExtrinsicCalibrator( + configuration=CharucoOpenCVExtrinsicCalibrator.Configuration(data_path=temppath)) + for frame_id, image_filepaths_by_camera_id in image_filepaths_by_frame_camera.items(): + for camera_id, image_filepath in image_filepaths_by_camera_id.items(): + image: numpy.ndarray = cv2.imread(image_filepath) + image_base64: str = ImageUtils.image_to_base64(image) + extrinsic_calibrator.add_image( + image_base64=image_base64, + detector_label=camera_id, + timestamp_utc_iso8601=timestamps_iso8601_by_frame[frame_id]) + for camera_id, intrinsic_parameters in intrinsics_by_camera.items(): + extrinsic_calibrator.intrinsic_parameters_update( + detector_label=camera_id, + intrinsic_parameters=intrinsic_parameters) + _, extrinsic_calibration = extrinsic_calibrator.calculate() + + # label, translation, rotation (as quaternion) + ground_truth_detector_poses: dict[str, tuple[list[float], list[float]]] = { + "01": ([-866.025, 0., 500.], [ 0.353553, -0.353553, -0.612372, 0.612372]), + "02": ([-612.372, -612.372, 500.], [ 0.46194, -0.191342, -0.331414, 0.800103]), + "03": ([0., -866.025, 500.], [ 0.5, 0., 0., 0.866025]), + "04": ([612.372, -612.372, 500.], [ 0.46194, 0.191342, 0.331414, 0.800103]), + "05": ([866.025, 0., 500.], [ 0.353553, 0.353553, 0.612372, 0.612372]), + "06": ([-707.107, 0., 707.107], [ 0.270598, -0.270598, -0.653281, 0.653282]), + "07": ([-500., -500., 707.107], [ 0.353553, -0.146447, -0.353553, 0.853553]), + "08": ([0., -707.107, 707.107], [ 0.382683, 0., 0., 0.92388]), + "09": ([500., -500., 707.107], [ 0.353553, 0.146447, 0.353553, 0.853553]), + "10": ([707.107, 0., 707.107], [ 0.270598, 0.270598, 0.653282, 0.653281])} + + calibrated_value: ExtrinsicCalibrationDetectorResult + for calibrated_value in extrinsic_calibration.calibrated_values: + expected_translation: list[float] + expected_rotation_quaternion: list[float] + expected_translation, expected_rotation_quaternion = \ + ground_truth_detector_poses[calibrated_value.detector_label] + obtained_translation: list[float] = calibrated_value.detector_to_reference.get_translation() + obtained_rotation_quaternion: list[float] = \ + calibrated_value.detector_to_reference.get_rotation_as_quaternion() + + translation_difference_vector: numpy.ndarray = \ + numpy.asarray(expected_translation) - numpy.asarray(obtained_translation) + # noinspection PyArgumentList + image_plane_normal: numpy.ndarray = numpy.matmul( + Rotation.from_quat(expected_rotation_quaternion).as_matrix(), + numpy.asarray([0, 0, 1])) + translation_difference_in_plane_mm: float = numpy.dot( + translation_difference_vector, + image_plane_normal) + self.assertLess(translation_difference_in_plane_mm, THRESHOLD_TRANSLATION_IN_PLANE_MM) + + # Pythagorean theorem to get the translation component out of plane + translation_difference_out_of_plane_mm: float = numpy.sqrt( + translation_difference_in_plane_mm ** 2 + numpy.linalg.norm(translation_difference_vector) ** 2) + self.assertLess(translation_difference_out_of_plane_mm, THRESHOLD_TRANSLATION_OUT_OF_PLANE_MM) + + # noinspection PyArgumentList + rotation_difference_deg: float = numpy.linalg.norm( + Rotation.from_matrix(numpy.matmul( + Rotation.from_quat(expected_rotation_quaternion).as_matrix(), + numpy.linalg.inv(Rotation.from_quat(obtained_rotation_quaternion).as_matrix()) + )).as_rotvec(degrees=True)) + self.assertLess(rotation_difference_deg, THRESHOLD_ROTATION_DEG) + + # print( + # f"{calibrated_value.detector_label}: \n" + # f" Expected translation: {expected_translation}\n" + # f" Obtained translation: {obtained_translation}\n" + # f" Expected rotation: {expected_rotation_quaternion}\n" + # f" Obtained rotation: {obtained_rotation_quaternion}\n" + # f" Translation Diff IP: {translation_difference_in_plane_mm}\n" + # f" Translation Diff OOP: {translation_difference_out_of_plane_mm}\n" + # f" Rotation Diff Deg: {rotation_difference_deg}") + + # These are from the Blender file, copied by hand. + # They are not currently used in the test, but they are here for possible later use. + # Note that Blender represents quaternions in WXYZ, and here it is XYZW. + # The numbers have not been verified, and as such it is possible that there may be some errors. + _ground_truth_frame_poses: dict[str, tuple[list[float], list[float]]] = { + "A0": ([0., 0., 10.], [ 0., 0., 0., 1.]), + "B1": ([125., 100., 30.], [ 0.065263, -0.113039, 0.495722, 0.858616]), + "B2": ([-125., 100., 30.], [ 0.113039, -0.065263, 0.858616, 0.495722]), + "B3": ([125., -100., 30.], [ 0.065263, 0.113039, 0.495722, -0.858616]), + "B4": ([-125., -100., 30.], [ 0.113039, 0.065263, 0.858616, -0.495722]), + "C1": ([250., 250., 30.], [ 0.065263, -0.113039, 0.495722, 0.858616]), + "C2": ([-250., 250., 30.], [ 0.113039, -0.065263, 0.858616, 0.495722]), + "C3": ([250., -250., 30.], [ 0.065263, 0.113039, 0.495722, -0.858616]), + "C4": ([-250., -250., 30.], [ 0.113039, 0.065263, 0.858616, -0.495722]), + "D1": ([-25., -25., 80.], [ 0.176704, -0.4266, 0.339444, 0.819491]), + "D2": ([25., -25., 80.], [ 0.4266, -0.176703, 0.819491, 0.339444]), + "D3": ([-25., 25., 80.], [ -0.176704, -0.4266, -0.339444, 0.819491]), + "D4": ([25., 25., 80.], [ 0.4266, 0.176704, 0.819491, -0.339445]), + "E1": ([-175., 5., 105.], [ 0.06027, -0.457798, 0.115778, 0.879422]), + "E2": ([0., -175., 105.], [ 0.326506, -0.326506, 0.627211, 0.627211]), + "E3": ([175., -5., 105.], [ 0.457798, -0.06027, 0.879422, 0.115778])} + + +if __name__ == "__main__": + a = TestPoseSolver() + a.test() diff --git a/test/test_register_corresponding_points.py b/test/test_math_utils.py similarity index 52% rename from test/test_register_corresponding_points.py rename to test/test_math_utils.py index 7e5e085..3299104 100644 --- a/test/test_register_corresponding_points.py +++ b/test/test_math_utils.py @@ -1,11 +1,18 @@ -from src.common.util import register_corresponding_points +from src.common import \ + IterativeClosestPointParameters, \ + MathUtils, \ + Ray +import datetime import numpy from scipy.spatial.transform import Rotation -import unittest +from typing import Final +from unittest import TestCase -# noinspection DuplicatedCode -class TestRegisterCorrespondingPoints(unittest.TestCase): +EPSILON: Final[float] = 0.0001 + + +class TestMathUtils(TestCase): def assertRotationCloseToIdentity( self, @@ -22,13 +29,76 @@ def assertRotationCloseToIdentity( self.assertAlmostEqual(float(matrix[2, 1]), 0.0, delta=tolerance) self.assertAlmostEqual(float(matrix[2, 2]), 1.0, delta=tolerance) - def test_identity_3_points(self): + def test_convex_quadrilateral_area(self): + points = [ + [1.0, 3.0], + [2.0, 5.0], + [5.0, 3.0], + [3.0, 2.0]] + area = 6.0 + self.assertAlmostEqual(abs(MathUtils.convex_quadrilateral_area(points)), area, delta=EPSILON) + self.assertAlmostEqual(abs(MathUtils.convex_quadrilateral_area([points[3]] + points[0:3])), area, delta=EPSILON) + + def test_iterative_closest_point(self) -> None: + # Transformation is approximately + source_known_points = [ + [2.0, 0.0, 2.0], + [2.0, 2.0, 2.0], + [2.0, 2.0, 0.0], + [2.0, 0.0, 0.0]] + source_ray_points = [ + [0.0, 2.0, 2.0], + [0.0, 0.0, 2.0], + [0.0, 0.0, 0.0], + [0.0, 2.0, 0.0]] + target_known_points = [ + [1.0, 1.0, -2.0], + [-1.0, 1.0, -2.0], + [-1.0, -1.0, -2.0], + [1.0, -1.0, -2.0]] + origin = [0.0, 0.0, 1.0] + sqrt3 = numpy.sqrt(3.0) + target_rays = [ + Ray(source_point=origin, direction=[-sqrt3, sqrt3, -sqrt3]), + Ray(source_point=origin, direction=[sqrt3, sqrt3, -sqrt3]), + Ray(source_point=origin, direction=[sqrt3, -sqrt3, -sqrt3]), + Ray(source_point=origin, direction=[-sqrt3, -sqrt3, -sqrt3])] + begin_datetime = datetime.datetime.now(tz=datetime.timezone.utc) + icp_parameters = IterativeClosestPointParameters( + termination_iteration_count=100, + termination_delta_translation=0.001, + termination_delta_rotation_radians=0.001, + termination_mean_point_distance=0.0001, + termination_rms_point_distance=0.0001) + icp_output = MathUtils.iterative_closest_point_for_points_and_rays( + source_known_points=source_known_points, + target_known_points=target_known_points, + source_ray_points=source_ray_points, + target_rays=target_rays, + parameters=icp_parameters) + source_to_target_matrix = icp_output.source_to_target_matrix.as_numpy_array() + end_datetime = datetime.datetime.now(tz=datetime.timezone.utc) + duration = (end_datetime - begin_datetime) + duration_seconds = duration.seconds + (duration.microseconds / 1000000.0) + message = str() + for source_point in source_known_points: + source_4d = [source_point[0], source_point[1], source_point[2], 1] + target_4d = list(numpy.matmul(source_to_target_matrix, source_4d)) + target_point = [target_4d[0], target_4d[1], target_4d[2]] + message = message + str(target_point) + "\n" + message += "Algorithm took " + "{:.6f}".format(duration_seconds) + " seconds " + \ + "and took " + str(icp_output.iteration_count) + " iterations." + print(message) + + # TODO: Comparisons, self.assertXXXXX() + + def test_register_corresponding_points_identity_3_points(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] point_set_to = point_set_from - matrix = register_corresponding_points(point_set_from, point_set_to) + matrix = MathUtils.register_corresponding_points(point_set_from, point_set_to) self.assertRotationCloseToIdentity(matrix) self.assertAlmostEqual(matrix[0, 3], 0.0) self.assertAlmostEqual(matrix[1, 3], 0.0) @@ -38,14 +108,14 @@ def test_identity_3_points(self): self.assertEqual(matrix[3, 1], 0.0) self.assertEqual(matrix[3, 2], 0.0) - def test_identity_4_points(self): + def test_register_corresponding_points_identity_4_points(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]] point_set_to = point_set_from - matrix = register_corresponding_points(point_set_from, point_set_to) + matrix = MathUtils.register_corresponding_points(point_set_from, point_set_to) self.assertRotationCloseToIdentity(matrix) self.assertAlmostEqual(matrix[0, 3], 0.0) self.assertAlmostEqual(matrix[1, 3], 0.0) @@ -55,14 +125,14 @@ def test_identity_4_points(self): self.assertEqual(matrix[3, 1], 0.0) self.assertEqual(matrix[3, 2], 0.0) - def test_translation(self): + def test_register_corresponding_points_translation(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] translation = [10, 0, 10] point_set_to = list(point_set_from + numpy.stack(translation)) - matrix = register_corresponding_points(point_set_from, point_set_to) + matrix = MathUtils.register_corresponding_points(point_set_from, point_set_to) self.assertRotationCloseToIdentity(matrix) self.assertAlmostEqual(matrix[0, 3], translation[0]) self.assertAlmostEqual(matrix[1, 3], translation[1]) @@ -72,14 +142,14 @@ def test_translation(self): self.assertEqual(matrix[3, 1], 0.0) self.assertEqual(matrix[3, 2], 0.0) - def test_rotation(self): + def test_register_corresponding_points_rotation(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] ground_truth = Rotation.as_matrix(Rotation.from_euler(seq="x", angles=90, degrees=True)) point_set_to = list(numpy.matmul(ground_truth, numpy.asarray(point_set_from).T).T) - result = register_corresponding_points(point_set_from, point_set_to) + result = MathUtils.register_corresponding_points(point_set_from, point_set_to) for i in range(0, 3): for j in range(0, 3): self.assertAlmostEqual(result[i][j], ground_truth[i][j]) @@ -91,7 +161,7 @@ def test_rotation(self): self.assertEqual(result[3, 1], 0.0) self.assertEqual(result[3, 2], 0.0) - def test_rotation_and_translation(self): + def test_register_corresponding_points_rotation_and_translation(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], @@ -100,7 +170,7 @@ def test_rotation_and_translation(self): point_set_to = list(numpy.matmul(ground_truth, numpy.asarray(point_set_from).T).T) translation = [10, -20, 30] point_set_to = list(point_set_to + numpy.stack(translation)) - result = register_corresponding_points(point_set_from, point_set_to) + result = MathUtils.register_corresponding_points(point_set_from, point_set_to) for i in range(0, 3): for j in range(0, 3): self.assertAlmostEqual(result[i][j], ground_truth[i][j]) @@ -112,15 +182,15 @@ def test_rotation_and_translation(self): self.assertEqual(result[3, 1], 0.0) self.assertEqual(result[3, 2], 0.0) - def test_too_few_points(self): + def test_register_corresponding_points_too_few_points(self): point_set_from = [[0.0, 0.0, 0.0]] point_set_to = point_set_from try: - register_corresponding_points(point_set_from, point_set_to) + MathUtils.register_corresponding_points(point_set_from, point_set_to) except ValueError: pass - def test_inequal_point_set_lengths(self): + def test_register_corresponding_points_inequal_point_set_lengths(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], @@ -131,28 +201,29 @@ def test_inequal_point_set_lengths(self): [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] try: - register_corresponding_points(point_set_from, point_set_to) + MathUtils.register_corresponding_points(point_set_from, point_set_to) except ValueError: pass - def test_collinear(self): + def test_register_corresponding_points_collinear(self): point_set_from = [ [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]] point_set_to = point_set_from try: - register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) + MathUtils.register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) except ValueError: pass - def test_singular(self): + def test_register_corresponding_points_singular(self): point_set_from = [ [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] point_set_to = point_set_from try: - register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) + MathUtils.register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) except ValueError: pass + diff --git a/test/test_pose_solver.py b/test/test_pose_solver.py index 0188f22..81c5c2c 100644 --- a/test/test_pose_solver.py +++ b/test/test_pose_solver.py @@ -1,32 +1,27 @@ -from src.pose_solver.pose_solver import PoseSolver -from src.common.structures import \ - DetectorFrame, \ - ImageResolution, \ +from src.common import \ + Annotation, \ IntrinsicParameters, \ - MarkerCornerImagePoint, \ - MarkerSnapshot, \ Matrix4x4, \ - Pose -from src.common.structures import \ - TargetMarker + Pose, \ + PoseSolver, \ + Target +from src.implementations.common_aruco_opencv import ArucoOpenCVCommon import datetime from typing import Final import unittest -IMAGE_RESOLUTION: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) +REL_CHAR: Final[str] = Annotation.RELATION_CHARACTER # For brevity MARKER_SIZE_MM: Final[float] = 10.0 REFERENCE_TARGET_ID: Final[str] = "reference" REFERENCE_MARKER_ID: Final[str] = "0" -REFERENCE_MARKER_TARGET: Final[TargetMarker] = TargetMarker( - target_id=REFERENCE_TARGET_ID, - marker_id=REFERENCE_MARKER_ID, +REFERENCE_MARKER_TARGET: Final[Target] = ArucoOpenCVCommon.target_from_marker_parameters( + base_label=REFERENCE_MARKER_ID, marker_size=MARKER_SIZE_MM) TARGET_TARGET_ID: Final[str] = "target" TARGET_MARKER_ID: Final[str] = "1" -TARGET_MARKER_TARGET: Final[TargetMarker] = TargetMarker( - target_id=TARGET_TARGET_ID, - marker_id=TARGET_MARKER_ID, +TARGET_MARKER_TARGET: Final[Target] = ArucoOpenCVCommon.target_from_marker_parameters( + base_label=TARGET_MARKER_ID, marker_size=MARKER_SIZE_MM) DETECTOR_RED_NAME: Final[str] = "det_red" DETECTOR_RED_INTRINSICS: Final[IntrinsicParameters] = IntrinsicParameters( @@ -101,36 +96,29 @@ def assertRotationCloseToIdentity( def test_single_camera_viewing_target_marker(self): # Note that single-marker tests are particularly susceptible to reference pose ambiguity - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(datetime.timezone.utc) pose_solver: PoseSolver = PoseSolver() + # TODO: The following line shall be replaced upon implementation of an appropriate mechanism + pose_solver._configuration.minimum_detector_count = 1 pose_solver.set_intrinsic_parameters( detector_label=DETECTOR_RED_NAME, intrinsic_parameters=DETECTOR_RED_INTRINSICS) pose_solver.add_target(target=REFERENCE_MARKER_TARGET) pose_solver.add_target(target=TARGET_MARKER_TARGET) - pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.target_id) + pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.label) # Reference is on the left, target is on the right, both in the same plane and along the x-axis of the image. pose_solver.add_detector_frame( detector_label=DETECTOR_RED_NAME, - detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=375, y_px=347), - MarkerCornerImagePoint(x_px=415, y_px=346), - MarkerCornerImagePoint(x_px=416, y_px=386), - MarkerCornerImagePoint(x_px=376, y_px=386)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=541, y_px=347), - MarkerCornerImagePoint(x_px=581, y_px=348), - MarkerCornerImagePoint(x_px=580, y_px=388), - MarkerCornerImagePoint(x_px=540, y_px=387)])], - rejected_marker_snapshots=list(), - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=375, y_px=347), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=415, y_px=346), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=416, y_px=386), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=376, y_px=386), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=541, y_px=347), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=581, y_px=348), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=580, y_px=388), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=540, y_px=387)], + frame_timestamp_utc=now_utc) pose_solver.update() detector_poses: list[Pose] target_poses: list[Pose] @@ -164,7 +152,7 @@ def test_single_camera_viewing_target_marker(self): def test_four_cameras_viewing_target_marker(self): # Note that single-marker tests are particularly susceptible to reference pose ambiguity - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(datetime.timezone.utc) pose_solver: PoseSolver = PoseSolver() pose_solver.set_intrinsic_parameters( detector_label=DETECTOR_RED_NAME, @@ -180,91 +168,55 @@ def test_four_cameras_viewing_target_marker(self): intrinsic_parameters=DETECTOR_YELLOW_INTRINSICS) pose_solver.add_target(target=REFERENCE_MARKER_TARGET) pose_solver.add_target(target=TARGET_MARKER_TARGET) - pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.target_id) + pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.label) pose_solver.add_detector_frame( detector_label=DETECTOR_RED_NAME, - detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=157, y_px=210), - MarkerCornerImagePoint(x_px=165, y_px=221), - MarkerCornerImagePoint(x_px=139, y_px=229), - MarkerCornerImagePoint(x_px=131, y_px=217)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=196, y_px=266), - MarkerCornerImagePoint(x_px=206, y_px=281), - MarkerCornerImagePoint(x_px=178, y_px=291), - MarkerCornerImagePoint(x_px=167, y_px=275)])], - rejected_marker_snapshots=list(), - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=157, y_px=210), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=165, y_px=221), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=139, y_px=229), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=131, y_px=217), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=196, y_px=266), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=206, y_px=281), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=178, y_px=291), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=167, y_px=275)], + frame_timestamp_utc=now_utc) pose_solver.add_detector_frame( detector_label=DETECTOR_SKY_NAME, - detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=190, y_px=234), - MarkerCornerImagePoint(x_px=219, y_px=246), - MarkerCornerImagePoint(x_px=195, y_px=270), - MarkerCornerImagePoint(x_px=166, y_px=257)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=317, y_px=290), - MarkerCornerImagePoint(x_px=352, y_px=306), - MarkerCornerImagePoint(x_px=332, y_px=333), - MarkerCornerImagePoint(x_px=296, y_px=317)])], - rejected_marker_snapshots=list(), - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=190, y_px=234), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=219, y_px=246), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=195, y_px=270), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=166, y_px=257), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=317, y_px=290), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=352, y_px=306), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=332, y_px=333), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=296, y_px=317)], + frame_timestamp_utc=now_utc) pose_solver.add_detector_frame( detector_label=DETECTOR_GREEN_NAME, - detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=247, y_px=304), - MarkerCornerImagePoint(x_px=283, y_px=296), - MarkerCornerImagePoint(x_px=291, y_px=326), - MarkerCornerImagePoint(x_px=254, y_px=334)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=392, y_px=277), - MarkerCornerImagePoint(x_px=426, y_px=271), - MarkerCornerImagePoint(x_px=438, y_px=299), - MarkerCornerImagePoint(x_px=403, y_px=305)])], - rejected_marker_snapshots=list(), - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=247, y_px=304), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=283, y_px=296), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=291, y_px=326), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=254, y_px=334), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=392, y_px=277), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=426, y_px=271), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=438, y_px=299), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=403, y_px=305)], + frame_timestamp_utc=now_utc) pose_solver.add_detector_frame( detector_label=DETECTOR_YELLOW_NAME, - detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=275, y_px=277), - MarkerCornerImagePoint(x_px=289, y_px=251), - MarkerCornerImagePoint(x_px=321, y_px=261), - MarkerCornerImagePoint(x_px=306, y_px=288)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=332, y_px=177), - MarkerCornerImagePoint(x_px=344, y_px=156), - MarkerCornerImagePoint(x_px=372, y_px=163), - MarkerCornerImagePoint(x_px=361, y_px=185)])], - rejected_marker_snapshots=list(), - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=275, y_px=277), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=289, y_px=251), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=321, y_px=261), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=306, y_px=288), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=332, y_px=177), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=344, y_px=156), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=372, y_px=163), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=361, y_px=185)], + frame_timestamp_utc=now_utc) pose_solver.update() detector_poses: list[Pose] target_poses: list[Pose]