From bb8f4147187fd2d5c7095845615eb53949e32226 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Fri, 16 May 2025 17:01:42 -0400 Subject: [PATCH 01/33] WIP: Extrinsic Calibration from multiple synchronized frames --- test/images/simulated/ideal/C01_FA0.png | 3 + test/images/simulated/ideal/C01_FB1.png | 3 + test/images/simulated/ideal/C01_FB2.png | 3 + test/images/simulated/ideal/C01_FB3.png | 3 + test/images/simulated/ideal/C01_FB4.png | 3 + test/images/simulated/ideal/C01_FC1.png | 3 + test/images/simulated/ideal/C01_FC2.png | 3 + test/images/simulated/ideal/C01_FC3.png | 3 + test/images/simulated/ideal/C01_FC4.png | 3 + test/images/simulated/ideal/C01_FD1.png | 3 + test/images/simulated/ideal/C01_FD2.png | 3 + test/images/simulated/ideal/C01_FD3.png | 3 + test/images/simulated/ideal/C01_FD4.png | 3 + test/images/simulated/ideal/C01_FE1.png | 3 + test/images/simulated/ideal/C01_FE2.png | 3 + test/images/simulated/ideal/C01_FE3.png | 3 + test/images/simulated/ideal/C02_FA0.png | 3 + test/images/simulated/ideal/C02_FB1.png | 3 + test/images/simulated/ideal/C02_FB2.png | 3 + test/images/simulated/ideal/C02_FB3.png | 3 + test/images/simulated/ideal/C02_FB4.png | 3 + test/images/simulated/ideal/C02_FC1.png | 3 + test/images/simulated/ideal/C02_FC2.png | 3 + test/images/simulated/ideal/C02_FC3.png | 3 + test/images/simulated/ideal/C02_FC4.png | 3 + test/images/simulated/ideal/C02_FD1.png | 3 + test/images/simulated/ideal/C02_FD2.png | 3 + test/images/simulated/ideal/C02_FD3.png | 3 + test/images/simulated/ideal/C02_FD4.png | 3 + test/images/simulated/ideal/C02_FE1.png | 3 + test/images/simulated/ideal/C02_FE2.png | 3 + test/images/simulated/ideal/C02_FE3.png | 3 + test/images/simulated/ideal/C03_FA0.png | 3 + test/images/simulated/ideal/C03_FB1.png | 3 + test/images/simulated/ideal/C03_FB2.png | 3 + test/images/simulated/ideal/C03_FB3.png | 3 + test/images/simulated/ideal/C03_FB4.png | 3 + test/images/simulated/ideal/C03_FC1.png | 3 + test/images/simulated/ideal/C03_FC2.png | 3 + test/images/simulated/ideal/C03_FC3.png | 3 + test/images/simulated/ideal/C03_FC4.png | 3 + test/images/simulated/ideal/C03_FD1.png | 3 + test/images/simulated/ideal/C03_FD2.png | 3 + test/images/simulated/ideal/C03_FD3.png | 3 + test/images/simulated/ideal/C03_FD4.png | 3 + test/images/simulated/ideal/C03_FE1.png | 3 + test/images/simulated/ideal/C03_FE2.png | 3 + test/images/simulated/ideal/C03_FE3.png | 3 + test/images/simulated/ideal/C04_FA0.png | 3 + test/images/simulated/ideal/C04_FB1.png | 3 + test/images/simulated/ideal/C04_FB2.png | 3 + test/images/simulated/ideal/C04_FB3.png | 3 + test/images/simulated/ideal/C04_FB4.png | 3 + test/images/simulated/ideal/C04_FC1.png | 3 + test/images/simulated/ideal/C04_FC2.png | 3 + test/images/simulated/ideal/C04_FC3.png | 3 + test/images/simulated/ideal/C04_FC4.png | 3 + test/images/simulated/ideal/C04_FD1.png | 3 + test/images/simulated/ideal/C04_FD2.png | 3 + test/images/simulated/ideal/C04_FD3.png | 3 + test/images/simulated/ideal/C04_FD4.png | 3 + test/images/simulated/ideal/C04_FE1.png | 3 + test/images/simulated/ideal/C04_FE2.png | 3 + test/images/simulated/ideal/C04_FE3.png | 3 + test/images/simulated/ideal/C05_FA0.png | 3 + test/images/simulated/ideal/C05_FB1.png | 3 + test/images/simulated/ideal/C05_FB2.png | 3 + test/images/simulated/ideal/C05_FB3.png | 3 + test/images/simulated/ideal/C05_FB4.png | 3 + test/images/simulated/ideal/C05_FC1.png | 3 + test/images/simulated/ideal/C05_FC2.png | 3 + test/images/simulated/ideal/C05_FC3.png | 3 + test/images/simulated/ideal/C05_FC4.png | 3 + test/images/simulated/ideal/C05_FD1.png | 3 + test/images/simulated/ideal/C05_FD2.png | 3 + test/images/simulated/ideal/C05_FD3.png | 3 + test/images/simulated/ideal/C05_FD4.png | 3 + test/images/simulated/ideal/C05_FE1.png | 3 + test/images/simulated/ideal/C05_FE2.png | 3 + test/images/simulated/ideal/C05_FE3.png | 3 + test/images/simulated/ideal/C06_FA0.png | 3 + test/images/simulated/ideal/C06_FB1.png | 3 + test/images/simulated/ideal/C06_FB2.png | 3 + test/images/simulated/ideal/C06_FB3.png | 3 + test/images/simulated/ideal/C06_FB4.png | 3 + test/images/simulated/ideal/C06_FC1.png | 3 + test/images/simulated/ideal/C06_FC2.png | 3 + test/images/simulated/ideal/C06_FC3.png | 3 + test/images/simulated/ideal/C06_FC4.png | 3 + test/images/simulated/ideal/C06_FD1.png | 3 + test/images/simulated/ideal/C06_FD2.png | 3 + test/images/simulated/ideal/C06_FD3.png | 3 + test/images/simulated/ideal/C06_FD4.png | 3 + test/images/simulated/ideal/C06_FE1.png | 3 + test/images/simulated/ideal/C06_FE2.png | 3 + test/images/simulated/ideal/C06_FE3.png | 3 + test/images/simulated/ideal/C07_FA0.png | 3 + test/images/simulated/ideal/C07_FB1.png | 3 + test/images/simulated/ideal/C07_FB2.png | 3 + test/images/simulated/ideal/C07_FB3.png | 3 + test/images/simulated/ideal/C07_FB4.png | 3 + test/images/simulated/ideal/C07_FC1.png | 3 + test/images/simulated/ideal/C07_FC2.png | 3 + test/images/simulated/ideal/C07_FC3.png | 3 + test/images/simulated/ideal/C07_FC4.png | 3 + test/images/simulated/ideal/C07_FD1.png | 3 + test/images/simulated/ideal/C07_FD2.png | 3 + test/images/simulated/ideal/C07_FD3.png | 3 + test/images/simulated/ideal/C07_FD4.png | 3 + test/images/simulated/ideal/C07_FE1.png | 3 + test/images/simulated/ideal/C07_FE2.png | 3 + test/images/simulated/ideal/C07_FE3.png | 3 + test/images/simulated/ideal/C08_FA0.png | 3 + test/images/simulated/ideal/C08_FB1.png | 3 + test/images/simulated/ideal/C08_FB2.png | 3 + test/images/simulated/ideal/C08_FB3.png | 3 + test/images/simulated/ideal/C08_FB4.png | 3 + test/images/simulated/ideal/C08_FC1.png | 3 + test/images/simulated/ideal/C08_FC2.png | 3 + test/images/simulated/ideal/C08_FC3.png | 3 + test/images/simulated/ideal/C08_FC4.png | 3 + test/images/simulated/ideal/C08_FD1.png | 3 + test/images/simulated/ideal/C08_FD2.png | 3 + test/images/simulated/ideal/C08_FD3.png | 3 + test/images/simulated/ideal/C08_FD4.png | 3 + test/images/simulated/ideal/C08_FE1.png | 3 + test/images/simulated/ideal/C08_FE2.png | 3 + test/images/simulated/ideal/C08_FE3.png | 3 + test/images/simulated/ideal/C09_FA0.png | 3 + test/images/simulated/ideal/C09_FB1.png | 3 + test/images/simulated/ideal/C09_FB2.png | 3 + test/images/simulated/ideal/C09_FB3.png | 3 + test/images/simulated/ideal/C09_FB4.png | 3 + test/images/simulated/ideal/C09_FC1.png | 3 + test/images/simulated/ideal/C09_FC2.png | 3 + test/images/simulated/ideal/C09_FC3.png | 3 + test/images/simulated/ideal/C09_FC4.png | 3 + test/images/simulated/ideal/C09_FD1.png | 3 + test/images/simulated/ideal/C09_FD2.png | 3 + test/images/simulated/ideal/C09_FD3.png | 3 + test/images/simulated/ideal/C09_FD4.png | 3 + test/images/simulated/ideal/C09_FE1.png | 3 + test/images/simulated/ideal/C09_FE2.png | 3 + test/images/simulated/ideal/C09_FE3.png | 3 + test/images/simulated/ideal/C10_FA0.png | 3 + test/images/simulated/ideal/C10_FB1.png | 3 + test/images/simulated/ideal/C10_FB2.png | 3 + test/images/simulated/ideal/C10_FB3.png | 3 + test/images/simulated/ideal/C10_FB4.png | 3 + test/images/simulated/ideal/C10_FC1.png | 3 + test/images/simulated/ideal/C10_FC2.png | 3 + test/images/simulated/ideal/C10_FC3.png | 3 + test/images/simulated/ideal/C10_FC4.png | 3 + test/images/simulated/ideal/C10_FD1.png | 3 + test/images/simulated/ideal/C10_FD2.png | 3 + test/images/simulated/ideal/C10_FD3.png | 3 + test/images/simulated/ideal/C10_FD4.png | 3 + test/images/simulated/ideal/C10_FE1.png | 3 + test/images/simulated/ideal/C10_FE2.png | 3 + test/images/simulated/ideal/C10_FE3.png | 3 + test/images/simulated/ideal/about.txt | 13 +++ test/test_extrinsic_calibration.py | 105 ++++++++++++++++++++++++ 162 files changed, 598 insertions(+) create mode 100644 test/images/simulated/ideal/C01_FA0.png create mode 100644 test/images/simulated/ideal/C01_FB1.png create mode 100644 test/images/simulated/ideal/C01_FB2.png create mode 100644 test/images/simulated/ideal/C01_FB3.png create mode 100644 test/images/simulated/ideal/C01_FB4.png create mode 100644 test/images/simulated/ideal/C01_FC1.png create mode 100644 test/images/simulated/ideal/C01_FC2.png create mode 100644 test/images/simulated/ideal/C01_FC3.png create mode 100644 test/images/simulated/ideal/C01_FC4.png create mode 100644 test/images/simulated/ideal/C01_FD1.png create mode 100644 test/images/simulated/ideal/C01_FD2.png create mode 100644 test/images/simulated/ideal/C01_FD3.png create mode 100644 test/images/simulated/ideal/C01_FD4.png create mode 100644 test/images/simulated/ideal/C01_FE1.png create mode 100644 test/images/simulated/ideal/C01_FE2.png create mode 100644 test/images/simulated/ideal/C01_FE3.png create mode 100644 test/images/simulated/ideal/C02_FA0.png create mode 100644 test/images/simulated/ideal/C02_FB1.png create mode 100644 test/images/simulated/ideal/C02_FB2.png create mode 100644 test/images/simulated/ideal/C02_FB3.png create mode 100644 test/images/simulated/ideal/C02_FB4.png create mode 100644 test/images/simulated/ideal/C02_FC1.png create mode 100644 test/images/simulated/ideal/C02_FC2.png create mode 100644 test/images/simulated/ideal/C02_FC3.png create mode 100644 test/images/simulated/ideal/C02_FC4.png create mode 100644 test/images/simulated/ideal/C02_FD1.png create mode 100644 test/images/simulated/ideal/C02_FD2.png create mode 100644 test/images/simulated/ideal/C02_FD3.png create mode 100644 test/images/simulated/ideal/C02_FD4.png create mode 100644 test/images/simulated/ideal/C02_FE1.png create mode 100644 test/images/simulated/ideal/C02_FE2.png create mode 100644 test/images/simulated/ideal/C02_FE3.png create mode 100644 test/images/simulated/ideal/C03_FA0.png create mode 100644 test/images/simulated/ideal/C03_FB1.png create mode 100644 test/images/simulated/ideal/C03_FB2.png create mode 100644 test/images/simulated/ideal/C03_FB3.png create mode 100644 test/images/simulated/ideal/C03_FB4.png create mode 100644 test/images/simulated/ideal/C03_FC1.png create mode 100644 test/images/simulated/ideal/C03_FC2.png create mode 100644 test/images/simulated/ideal/C03_FC3.png create mode 100644 test/images/simulated/ideal/C03_FC4.png create mode 100644 test/images/simulated/ideal/C03_FD1.png create mode 100644 test/images/simulated/ideal/C03_FD2.png create mode 100644 test/images/simulated/ideal/C03_FD3.png create mode 100644 test/images/simulated/ideal/C03_FD4.png create mode 100644 test/images/simulated/ideal/C03_FE1.png create mode 100644 test/images/simulated/ideal/C03_FE2.png create mode 100644 test/images/simulated/ideal/C03_FE3.png create mode 100644 test/images/simulated/ideal/C04_FA0.png create mode 100644 test/images/simulated/ideal/C04_FB1.png create mode 100644 test/images/simulated/ideal/C04_FB2.png create mode 100644 test/images/simulated/ideal/C04_FB3.png create mode 100644 test/images/simulated/ideal/C04_FB4.png create mode 100644 test/images/simulated/ideal/C04_FC1.png create mode 100644 test/images/simulated/ideal/C04_FC2.png create mode 100644 test/images/simulated/ideal/C04_FC3.png create mode 100644 test/images/simulated/ideal/C04_FC4.png create mode 100644 test/images/simulated/ideal/C04_FD1.png create mode 100644 test/images/simulated/ideal/C04_FD2.png create mode 100644 test/images/simulated/ideal/C04_FD3.png create mode 100644 test/images/simulated/ideal/C04_FD4.png create mode 100644 test/images/simulated/ideal/C04_FE1.png create mode 100644 test/images/simulated/ideal/C04_FE2.png create mode 100644 test/images/simulated/ideal/C04_FE3.png create mode 100644 test/images/simulated/ideal/C05_FA0.png create mode 100644 test/images/simulated/ideal/C05_FB1.png create mode 100644 test/images/simulated/ideal/C05_FB2.png create mode 100644 test/images/simulated/ideal/C05_FB3.png create mode 100644 test/images/simulated/ideal/C05_FB4.png create mode 100644 test/images/simulated/ideal/C05_FC1.png create mode 100644 test/images/simulated/ideal/C05_FC2.png create mode 100644 test/images/simulated/ideal/C05_FC3.png create mode 100644 test/images/simulated/ideal/C05_FC4.png create mode 100644 test/images/simulated/ideal/C05_FD1.png create mode 100644 test/images/simulated/ideal/C05_FD2.png create mode 100644 test/images/simulated/ideal/C05_FD3.png create mode 100644 test/images/simulated/ideal/C05_FD4.png create mode 100644 test/images/simulated/ideal/C05_FE1.png create mode 100644 test/images/simulated/ideal/C05_FE2.png create mode 100644 test/images/simulated/ideal/C05_FE3.png create mode 100644 test/images/simulated/ideal/C06_FA0.png create mode 100644 test/images/simulated/ideal/C06_FB1.png create mode 100644 test/images/simulated/ideal/C06_FB2.png create mode 100644 test/images/simulated/ideal/C06_FB3.png create mode 100644 test/images/simulated/ideal/C06_FB4.png create mode 100644 test/images/simulated/ideal/C06_FC1.png create mode 100644 test/images/simulated/ideal/C06_FC2.png create mode 100644 test/images/simulated/ideal/C06_FC3.png create mode 100644 test/images/simulated/ideal/C06_FC4.png create mode 100644 test/images/simulated/ideal/C06_FD1.png create mode 100644 test/images/simulated/ideal/C06_FD2.png create mode 100644 test/images/simulated/ideal/C06_FD3.png create mode 100644 test/images/simulated/ideal/C06_FD4.png create mode 100644 test/images/simulated/ideal/C06_FE1.png create mode 100644 test/images/simulated/ideal/C06_FE2.png create mode 100644 test/images/simulated/ideal/C06_FE3.png create mode 100644 test/images/simulated/ideal/C07_FA0.png create mode 100644 test/images/simulated/ideal/C07_FB1.png create mode 100644 test/images/simulated/ideal/C07_FB2.png create mode 100644 test/images/simulated/ideal/C07_FB3.png create mode 100644 test/images/simulated/ideal/C07_FB4.png create mode 100644 test/images/simulated/ideal/C07_FC1.png create mode 100644 test/images/simulated/ideal/C07_FC2.png create mode 100644 test/images/simulated/ideal/C07_FC3.png create mode 100644 test/images/simulated/ideal/C07_FC4.png create mode 100644 test/images/simulated/ideal/C07_FD1.png create mode 100644 test/images/simulated/ideal/C07_FD2.png create mode 100644 test/images/simulated/ideal/C07_FD3.png create mode 100644 test/images/simulated/ideal/C07_FD4.png create mode 100644 test/images/simulated/ideal/C07_FE1.png create mode 100644 test/images/simulated/ideal/C07_FE2.png create mode 100644 test/images/simulated/ideal/C07_FE3.png create mode 100644 test/images/simulated/ideal/C08_FA0.png create mode 100644 test/images/simulated/ideal/C08_FB1.png create mode 100644 test/images/simulated/ideal/C08_FB2.png create mode 100644 test/images/simulated/ideal/C08_FB3.png create mode 100644 test/images/simulated/ideal/C08_FB4.png create mode 100644 test/images/simulated/ideal/C08_FC1.png create mode 100644 test/images/simulated/ideal/C08_FC2.png create mode 100644 test/images/simulated/ideal/C08_FC3.png create mode 100644 test/images/simulated/ideal/C08_FC4.png create mode 100644 test/images/simulated/ideal/C08_FD1.png create mode 100644 test/images/simulated/ideal/C08_FD2.png create mode 100644 test/images/simulated/ideal/C08_FD3.png create mode 100644 test/images/simulated/ideal/C08_FD4.png create mode 100644 test/images/simulated/ideal/C08_FE1.png create mode 100644 test/images/simulated/ideal/C08_FE2.png create mode 100644 test/images/simulated/ideal/C08_FE3.png create mode 100644 test/images/simulated/ideal/C09_FA0.png create mode 100644 test/images/simulated/ideal/C09_FB1.png create mode 100644 test/images/simulated/ideal/C09_FB2.png create mode 100644 test/images/simulated/ideal/C09_FB3.png create mode 100644 test/images/simulated/ideal/C09_FB4.png create mode 100644 test/images/simulated/ideal/C09_FC1.png create mode 100644 test/images/simulated/ideal/C09_FC2.png create mode 100644 test/images/simulated/ideal/C09_FC3.png create mode 100644 test/images/simulated/ideal/C09_FC4.png create mode 100644 test/images/simulated/ideal/C09_FD1.png create mode 100644 test/images/simulated/ideal/C09_FD2.png create mode 100644 test/images/simulated/ideal/C09_FD3.png create mode 100644 test/images/simulated/ideal/C09_FD4.png create mode 100644 test/images/simulated/ideal/C09_FE1.png create mode 100644 test/images/simulated/ideal/C09_FE2.png create mode 100644 test/images/simulated/ideal/C09_FE3.png create mode 100644 test/images/simulated/ideal/C10_FA0.png create mode 100644 test/images/simulated/ideal/C10_FB1.png create mode 100644 test/images/simulated/ideal/C10_FB2.png create mode 100644 test/images/simulated/ideal/C10_FB3.png create mode 100644 test/images/simulated/ideal/C10_FB4.png create mode 100644 test/images/simulated/ideal/C10_FC1.png create mode 100644 test/images/simulated/ideal/C10_FC2.png create mode 100644 test/images/simulated/ideal/C10_FC3.png create mode 100644 test/images/simulated/ideal/C10_FC4.png create mode 100644 test/images/simulated/ideal/C10_FD1.png create mode 100644 test/images/simulated/ideal/C10_FD2.png create mode 100644 test/images/simulated/ideal/C10_FD3.png create mode 100644 test/images/simulated/ideal/C10_FD4.png create mode 100644 test/images/simulated/ideal/C10_FE1.png create mode 100644 test/images/simulated/ideal/C10_FE2.png create mode 100644 test/images/simulated/ideal/C10_FE3.png create mode 100644 test/images/simulated/ideal/about.txt create mode 100644 test/test_extrinsic_calibration.py diff --git a/test/images/simulated/ideal/C01_FA0.png b/test/images/simulated/ideal/C01_FA0.png new file mode 100644 index 0000000..8dd9976 --- /dev/null +++ b/test/images/simulated/ideal/C01_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:685c5a9a121ac1b83d1a93c1fe2da23a1b5984791458c371035b4877099998a1 +size 1422324 diff --git a/test/images/simulated/ideal/C01_FB1.png b/test/images/simulated/ideal/C01_FB1.png new file mode 100644 index 0000000..e24d09c --- /dev/null +++ b/test/images/simulated/ideal/C01_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ad8184557b4d46a0b43add7565c7bc272a5a2cbfc7e808caabcb4ff06973e1f +size 1447510 diff --git a/test/images/simulated/ideal/C01_FB2.png b/test/images/simulated/ideal/C01_FB2.png new file mode 100644 index 0000000..902234b --- /dev/null +++ b/test/images/simulated/ideal/C01_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85b79ba5366fd9fb4f273fe7662b7b087ae3d9eadfbcdce2c9880b8d6c6f1910 +size 1470896 diff --git a/test/images/simulated/ideal/C01_FB3.png b/test/images/simulated/ideal/C01_FB3.png new file mode 100644 index 0000000..c401861 --- /dev/null +++ b/test/images/simulated/ideal/C01_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef8f5d2582689662ffbf4e1b4afeeb9e8c2ce214f68ae909b8c2c3bcb0a410ed +size 1446378 diff --git a/test/images/simulated/ideal/C01_FB4.png b/test/images/simulated/ideal/C01_FB4.png new file mode 100644 index 0000000..4f1cb35 --- /dev/null +++ b/test/images/simulated/ideal/C01_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e59e45007a7da75c3ee75208ccc7a275beeac1f82f0faefb241e57e4bcd1f16 +size 1463333 diff --git a/test/images/simulated/ideal/C01_FC1.png b/test/images/simulated/ideal/C01_FC1.png new file mode 100644 index 0000000..502e13e --- /dev/null +++ b/test/images/simulated/ideal/C01_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96e55f09733e970f45a6f4f9bc5ba27ae6feeb07915b4d8698edcd791949d0e0 +size 1444312 diff --git a/test/images/simulated/ideal/C01_FC2.png b/test/images/simulated/ideal/C01_FC2.png new file mode 100644 index 0000000..ce5d2eb --- /dev/null +++ b/test/images/simulated/ideal/C01_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35b876f631136b7df6a49b2c40c569950041dc81cc5af3c82901c098f34b68d1 +size 1424405 diff --git a/test/images/simulated/ideal/C01_FC3.png b/test/images/simulated/ideal/C01_FC3.png new file mode 100644 index 0000000..d3c3de5 --- /dev/null +++ b/test/images/simulated/ideal/C01_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1e6f8dda4978cc95de3e5e0b9615515c5f672faf8b44ee6756f2065b4e01273 +size 1442862 diff --git a/test/images/simulated/ideal/C01_FC4.png b/test/images/simulated/ideal/C01_FC4.png new file mode 100644 index 0000000..95b09fa --- /dev/null +++ b/test/images/simulated/ideal/C01_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c71a738b4ece5f0999f4b334b90af73133f80d08c1de2bb43bf09ec536e5056 +size 1424085 diff --git a/test/images/simulated/ideal/C01_FD1.png b/test/images/simulated/ideal/C01_FD1.png new file mode 100644 index 0000000..007961e --- /dev/null +++ b/test/images/simulated/ideal/C01_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e029b8515cbf46e59ad23cd1b1d410d2038cb58fb065c4093fa22127bfdb098d +size 1463965 diff --git a/test/images/simulated/ideal/C01_FD2.png b/test/images/simulated/ideal/C01_FD2.png new file mode 100644 index 0000000..1c32646 --- /dev/null +++ b/test/images/simulated/ideal/C01_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:598b4b94dd0fd6eb1b8f68517b2d0ef0ec1bdb661e305f06d92c4f9433e32ed9 +size 1381951 diff --git a/test/images/simulated/ideal/C01_FD3.png b/test/images/simulated/ideal/C01_FD3.png new file mode 100644 index 0000000..d3f89e4 --- /dev/null +++ b/test/images/simulated/ideal/C01_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03953d988f02d65d562c7fde2350825b1c9401e53a19b291e9385002458843e4 +size 1464705 diff --git a/test/images/simulated/ideal/C01_FD4.png b/test/images/simulated/ideal/C01_FD4.png new file mode 100644 index 0000000..fb952a0 --- /dev/null +++ b/test/images/simulated/ideal/C01_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:155bced4c7fd47c303924629345ff649cd50e58eaffc807c6b81e9729be0ad52 +size 1380687 diff --git a/test/images/simulated/ideal/C01_FE1.png b/test/images/simulated/ideal/C01_FE1.png new file mode 100644 index 0000000..cb66017 --- /dev/null +++ b/test/images/simulated/ideal/C01_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66d939a5b78ed2c25ce4026494e7c7218a9708dee7723ff39570081a24464895 +size 1372376 diff --git a/test/images/simulated/ideal/C01_FE2.png b/test/images/simulated/ideal/C01_FE2.png new file mode 100644 index 0000000..e8d645f --- /dev/null +++ b/test/images/simulated/ideal/C01_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7751a9bc31b8dc4bb9fb6d7cf1cd9301c1da717623501b5c5dbe493625441a8 +size 1399105 diff --git a/test/images/simulated/ideal/C01_FE3.png b/test/images/simulated/ideal/C01_FE3.png new file mode 100644 index 0000000..177b381 --- /dev/null +++ b/test/images/simulated/ideal/C01_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78ace4130b9457bbcce73ad5af187941eb0dc641b1d572f6c91b752a550d3349 +size 1383902 diff --git a/test/images/simulated/ideal/C02_FA0.png b/test/images/simulated/ideal/C02_FA0.png new file mode 100644 index 0000000..e47c3bd --- /dev/null +++ b/test/images/simulated/ideal/C02_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:324e138e4791dee48f87874d710d6f7e15f8c08acea211088aad2bd2a6c523f8 +size 1433821 diff --git a/test/images/simulated/ideal/C02_FB1.png b/test/images/simulated/ideal/C02_FB1.png new file mode 100644 index 0000000..83b9b72 --- /dev/null +++ b/test/images/simulated/ideal/C02_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a331d0c31c8aea1f2504cc0b1ee67ec6f41ae5356536786d738afec99b18758d +size 1428455 diff --git a/test/images/simulated/ideal/C02_FB2.png b/test/images/simulated/ideal/C02_FB2.png new file mode 100644 index 0000000..14678cc --- /dev/null +++ b/test/images/simulated/ideal/C02_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:244b9fc75fa1778641350cabf4f81f14daa88a68338abbf9b3d0b9bc5f1b6b25 +size 1434883 diff --git a/test/images/simulated/ideal/C02_FB3.png b/test/images/simulated/ideal/C02_FB3.png new file mode 100644 index 0000000..eaddd37 --- /dev/null +++ b/test/images/simulated/ideal/C02_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f905031638871b79ea8f253acc0d362c60be61c773ad1027b0c810af1a27d2a3 +size 1414630 diff --git a/test/images/simulated/ideal/C02_FB4.png b/test/images/simulated/ideal/C02_FB4.png new file mode 100644 index 0000000..c8bc493 --- /dev/null +++ b/test/images/simulated/ideal/C02_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdac47abf6a57c1545f6dd1af6d3876384d02fe8d1a0e97856f3c022b5d1ad5b +size 1443043 diff --git a/test/images/simulated/ideal/C02_FC1.png b/test/images/simulated/ideal/C02_FC1.png new file mode 100644 index 0000000..4e925ce --- /dev/null +++ b/test/images/simulated/ideal/C02_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9373dfac6f53b2955c46c0003217b2832fcdb253379717d835fa8e9fce3c021 +size 1426021 diff --git a/test/images/simulated/ideal/C02_FC2.png b/test/images/simulated/ideal/C02_FC2.png new file mode 100644 index 0000000..aef5f3a --- /dev/null +++ b/test/images/simulated/ideal/C02_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:363ce035de49af2d894815d0c6557c992d4e7ff1cf785c9fcb2f091b910064f5 +size 1412680 diff --git a/test/images/simulated/ideal/C02_FC3.png b/test/images/simulated/ideal/C02_FC3.png new file mode 100644 index 0000000..369f252 --- /dev/null +++ b/test/images/simulated/ideal/C02_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22b5b8c060a4cadc58657215cf01f39c1e3fa5f469f0730aa229eb2ac064f2e6 +size 1412412 diff --git a/test/images/simulated/ideal/C02_FC4.png b/test/images/simulated/ideal/C02_FC4.png new file mode 100644 index 0000000..e8d5779 --- /dev/null +++ b/test/images/simulated/ideal/C02_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40e4f25fca2bb0c681e75d1241f3388cf19d1f48962be69caa928c10e8c522fc +size 1399049 diff --git a/test/images/simulated/ideal/C02_FD1.png b/test/images/simulated/ideal/C02_FD1.png new file mode 100644 index 0000000..f2023cc --- /dev/null +++ b/test/images/simulated/ideal/C02_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a18f86293ff877a55b6a00abd111e63f89fd6bd63bc21c7a7932ba7227d15422 +size 1330835 diff --git a/test/images/simulated/ideal/C02_FD2.png b/test/images/simulated/ideal/C02_FD2.png new file mode 100644 index 0000000..3ae2a0b --- /dev/null +++ b/test/images/simulated/ideal/C02_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a51df2176033567d59d95119f3bad99c43e8ea85121714ebb879fa3fd91c4a90 +size 1440705 diff --git a/test/images/simulated/ideal/C02_FD3.png b/test/images/simulated/ideal/C02_FD3.png new file mode 100644 index 0000000..94a4987 --- /dev/null +++ b/test/images/simulated/ideal/C02_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4105950ed15d27ec43c6873a9142f81a475a498ad59385416d5ad19314098afb +size 1457314 diff --git a/test/images/simulated/ideal/C02_FD4.png b/test/images/simulated/ideal/C02_FD4.png new file mode 100644 index 0000000..a9643b1 --- /dev/null +++ b/test/images/simulated/ideal/C02_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50be95b8b1585bc35b1faf49740e962af841f97feb7340e23b8f892cc1c894af +size 1329991 diff --git a/test/images/simulated/ideal/C02_FE1.png b/test/images/simulated/ideal/C02_FE1.png new file mode 100644 index 0000000..668c22c --- /dev/null +++ b/test/images/simulated/ideal/C02_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95e37f76c242537b07eee96ac32fab2a9ee1d1c4ccba97492e986ad2078a40bf +size 1416192 diff --git a/test/images/simulated/ideal/C02_FE2.png b/test/images/simulated/ideal/C02_FE2.png new file mode 100644 index 0000000..76e66ea --- /dev/null +++ b/test/images/simulated/ideal/C02_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54d6f219c4ed29eee36a282ddd3bff2a27e9f7e887fd48ce54ba335044a5c613 +size 1439930 diff --git a/test/images/simulated/ideal/C02_FE3.png b/test/images/simulated/ideal/C02_FE3.png new file mode 100644 index 0000000..3121d55 --- /dev/null +++ b/test/images/simulated/ideal/C02_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:746e74948b4294c43924f2e9ef9c4a16c187f1152dfc49470cd9cf165a808370 +size 1360805 diff --git a/test/images/simulated/ideal/C03_FA0.png b/test/images/simulated/ideal/C03_FA0.png new file mode 100644 index 0000000..efcc729 --- /dev/null +++ b/test/images/simulated/ideal/C03_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdc44679f5ae9b8c78042c5865c6689bf24671a71c332cd2848859f49f3a44a0 +size 1372959 diff --git a/test/images/simulated/ideal/C03_FB1.png b/test/images/simulated/ideal/C03_FB1.png new file mode 100644 index 0000000..29c9ae0 --- /dev/null +++ b/test/images/simulated/ideal/C03_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df665424ab1df581d0304e66d8a82355069552eb77bc628bac77e18dc036abd1 +size 1406590 diff --git a/test/images/simulated/ideal/C03_FB2.png b/test/images/simulated/ideal/C03_FB2.png new file mode 100644 index 0000000..41313d4 --- /dev/null +++ b/test/images/simulated/ideal/C03_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d1b196444d4d742910210d66d9f4a7bf2cd3170f363d4ef2bb4d223dbc62676 +size 1406352 diff --git a/test/images/simulated/ideal/C03_FB3.png b/test/images/simulated/ideal/C03_FB3.png new file mode 100644 index 0000000..d0fc5e7 --- /dev/null +++ b/test/images/simulated/ideal/C03_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73b9a7c365034b99ee54292c3555091db0aaccb91a54b9a8711d13828b39739c +size 1397026 diff --git a/test/images/simulated/ideal/C03_FB4.png b/test/images/simulated/ideal/C03_FB4.png new file mode 100644 index 0000000..3ec181f --- /dev/null +++ b/test/images/simulated/ideal/C03_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b340c2ed01d779889bc9ab5b8a977cc92534952cb3ab5a76207854017825c5d +size 1394740 diff --git a/test/images/simulated/ideal/C03_FC1.png b/test/images/simulated/ideal/C03_FC1.png new file mode 100644 index 0000000..1ef97e3 --- /dev/null +++ b/test/images/simulated/ideal/C03_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1a76d894119d48f8bdef1ffbc92775ca4e7ec52923da2109737d3dff201e492 +size 1402553 diff --git a/test/images/simulated/ideal/C03_FC2.png b/test/images/simulated/ideal/C03_FC2.png new file mode 100644 index 0000000..3cc1bb8 --- /dev/null +++ b/test/images/simulated/ideal/C03_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5958e875ed1f28fffc5847023178e1ddd1193ff3ef1540231a238fac9af9742 +size 1404246 diff --git a/test/images/simulated/ideal/C03_FC3.png b/test/images/simulated/ideal/C03_FC3.png new file mode 100644 index 0000000..8bb895c --- /dev/null +++ b/test/images/simulated/ideal/C03_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b64857b328d257f1505710466fe4df463948e5469268233ad16cdbedef60b33d +size 1375851 diff --git a/test/images/simulated/ideal/C03_FC4.png b/test/images/simulated/ideal/C03_FC4.png new file mode 100644 index 0000000..fca61fc --- /dev/null +++ b/test/images/simulated/ideal/C03_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00d9a2dc917045072e207384e4751de0b77b9c191a3d92eef37e6f45d683bbca +size 1375940 diff --git a/test/images/simulated/ideal/C03_FD1.png b/test/images/simulated/ideal/C03_FD1.png new file mode 100644 index 0000000..7a5bd15 --- /dev/null +++ b/test/images/simulated/ideal/C03_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e43790d876390a11b2ce2c09eb653a7ac23c9fe8c47358bb766e0eaa22ef606e +size 1424033 diff --git a/test/images/simulated/ideal/C03_FD2.png b/test/images/simulated/ideal/C03_FD2.png new file mode 100644 index 0000000..c629ca5 --- /dev/null +++ b/test/images/simulated/ideal/C03_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7b52ea177e008f5d3900629fb5695864e1fcd2daa902c56ab9f512e851e3726 +size 1424141 diff --git a/test/images/simulated/ideal/C03_FD3.png b/test/images/simulated/ideal/C03_FD3.png new file mode 100644 index 0000000..1b4d576 --- /dev/null +++ b/test/images/simulated/ideal/C03_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11f6df1a9c47d0d0619bb78b4ed1b16f4154abba48c383ca005faf4b83f22385 +size 1337620 diff --git a/test/images/simulated/ideal/C03_FD4.png b/test/images/simulated/ideal/C03_FD4.png new file mode 100644 index 0000000..bc36717 --- /dev/null +++ b/test/images/simulated/ideal/C03_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70a1c6df1305bca4da1ddba629bb4097524c0f7a236d201597798f72d847098c +size 1336490 diff --git a/test/images/simulated/ideal/C03_FE1.png b/test/images/simulated/ideal/C03_FE1.png new file mode 100644 index 0000000..21daf7f --- /dev/null +++ b/test/images/simulated/ideal/C03_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8703bded231856398f1c7be44c01aa310ff9cafe170060065694719fc7024f2f +size 1429249 diff --git a/test/images/simulated/ideal/C03_FE2.png b/test/images/simulated/ideal/C03_FE2.png new file mode 100644 index 0000000..2c286ad --- /dev/null +++ b/test/images/simulated/ideal/C03_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7f0304656f31db8bdadf674e0c1cae8396feade820a95a16168cd0051309f34 +size 1221456 diff --git a/test/images/simulated/ideal/C03_FE3.png b/test/images/simulated/ideal/C03_FE3.png new file mode 100644 index 0000000..e849e13 --- /dev/null +++ b/test/images/simulated/ideal/C03_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e421f8ac446563fb2fc04538748d5d4b193dcdc9b19029ed6de6fb5d178c866f +size 1422364 diff --git a/test/images/simulated/ideal/C04_FA0.png b/test/images/simulated/ideal/C04_FA0.png new file mode 100644 index 0000000..9a792ea --- /dev/null +++ b/test/images/simulated/ideal/C04_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9545fccc61f2909fd914459569d91616635cd6ddda0336bc4724f63672913279 +size 1434298 diff --git a/test/images/simulated/ideal/C04_FB1.png b/test/images/simulated/ideal/C04_FB1.png new file mode 100644 index 0000000..f33a90a --- /dev/null +++ b/test/images/simulated/ideal/C04_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17bc9b8464eb95d3d7941f40c2f3d01d7a88a9cfc8ffc53a6304ee54c8cc542d +size 1440510 diff --git a/test/images/simulated/ideal/C04_FB2.png b/test/images/simulated/ideal/C04_FB2.png new file mode 100644 index 0000000..c044b35 --- /dev/null +++ b/test/images/simulated/ideal/C04_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4463bd6d13cf500ffb8e382c1cc7540726afe542b7106932ec78fb421ce13999 +size 1422684 diff --git a/test/images/simulated/ideal/C04_FB3.png b/test/images/simulated/ideal/C04_FB3.png new file mode 100644 index 0000000..369e064 --- /dev/null +++ b/test/images/simulated/ideal/C04_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f297d045f48d0719a65ed092f5615a159517a07dde648d65e988f113fdf72255 +size 1451842 diff --git a/test/images/simulated/ideal/C04_FB4.png b/test/images/simulated/ideal/C04_FB4.png new file mode 100644 index 0000000..436f509 --- /dev/null +++ b/test/images/simulated/ideal/C04_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19c1086f32daf7f7f3fed31d67642d9286b7f00210a6d9039fadff70cb13f7e9 +size 1417477 diff --git a/test/images/simulated/ideal/C04_FC1.png b/test/images/simulated/ideal/C04_FC1.png new file mode 100644 index 0000000..0fe23b8 --- /dev/null +++ b/test/images/simulated/ideal/C04_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a930fa75cbc1d6173ff1e72ba7b5e03c83e543d71aaaa4da55445a29362fe8f +size 1410666 diff --git a/test/images/simulated/ideal/C04_FC2.png b/test/images/simulated/ideal/C04_FC2.png new file mode 100644 index 0000000..5e2a69c --- /dev/null +++ b/test/images/simulated/ideal/C04_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e8980178bc4cd969b2ff9fa68d49359fffb21010119b6ad249eac3054bf96c +size 1421464 diff --git a/test/images/simulated/ideal/C04_FC3.png b/test/images/simulated/ideal/C04_FC3.png new file mode 100644 index 0000000..01a89ac --- /dev/null +++ b/test/images/simulated/ideal/C04_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:093b848b9139de7f3fc10dfe71bbd87f04a48e622200e1eada6b5346f02e9a97 +size 1398790 diff --git a/test/images/simulated/ideal/C04_FC4.png b/test/images/simulated/ideal/C04_FC4.png new file mode 100644 index 0000000..2825ec3 --- /dev/null +++ b/test/images/simulated/ideal/C04_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4427b70592637bf9ea7f65cea1f99694de23537ff56663cb59c5b8ccc687890f +size 1410892 diff --git a/test/images/simulated/ideal/C04_FD1.png b/test/images/simulated/ideal/C04_FD1.png new file mode 100644 index 0000000..b84af6e --- /dev/null +++ b/test/images/simulated/ideal/C04_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fe7281ec4e3dfdd181b1de92a451ec899789a213f469f500169f8dedca8906d +size 1456787 diff --git a/test/images/simulated/ideal/C04_FD2.png b/test/images/simulated/ideal/C04_FD2.png new file mode 100644 index 0000000..25061cc --- /dev/null +++ b/test/images/simulated/ideal/C04_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4ae19c7356827a265a67cf9476c32d9df202e291dda272678ab3ee71a9e1068 +size 1330143 diff --git a/test/images/simulated/ideal/C04_FD3.png b/test/images/simulated/ideal/C04_FD3.png new file mode 100644 index 0000000..9184eaa --- /dev/null +++ b/test/images/simulated/ideal/C04_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d32c4abb5b80e2d7e2d9855290cecec46c13e656adf6af70d8e72c586486639 +size 1330479 diff --git a/test/images/simulated/ideal/C04_FD4.png b/test/images/simulated/ideal/C04_FD4.png new file mode 100644 index 0000000..3bebc2e --- /dev/null +++ b/test/images/simulated/ideal/C04_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f54ebb2e4d9030878f60d0860ad3d88110b7cb2e65dd72ee0bf85ae830d8cc95 +size 1441511 diff --git a/test/images/simulated/ideal/C04_FE1.png b/test/images/simulated/ideal/C04_FE1.png new file mode 100644 index 0000000..5133617 --- /dev/null +++ b/test/images/simulated/ideal/C04_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54d44ca459c22f74831ef027f68a632deda62e933e68e281964af1927459bbc3 +size 1360559 diff --git a/test/images/simulated/ideal/C04_FE2.png b/test/images/simulated/ideal/C04_FE2.png new file mode 100644 index 0000000..ce752a5 --- /dev/null +++ b/test/images/simulated/ideal/C04_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53220e5e301ff3f7919b09c566d3cd2023be1d785a6042236b6c299067c5a419 +size 1448002 diff --git a/test/images/simulated/ideal/C04_FE3.png b/test/images/simulated/ideal/C04_FE3.png new file mode 100644 index 0000000..b5d8d60 --- /dev/null +++ b/test/images/simulated/ideal/C04_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8be2aed01f6d944b9f6fa03da10ceafdbd95e35362cc7a8cafefdcf0f32c3d3b +size 1420667 diff --git a/test/images/simulated/ideal/C05_FA0.png b/test/images/simulated/ideal/C05_FA0.png new file mode 100644 index 0000000..dba8494 --- /dev/null +++ b/test/images/simulated/ideal/C05_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbbf583755725843021c8cbe4dd190b0088b202dede13b4242ff1678832f8a75 +size 1423110 diff --git a/test/images/simulated/ideal/C05_FB1.png b/test/images/simulated/ideal/C05_FB1.png new file mode 100644 index 0000000..831385f --- /dev/null +++ b/test/images/simulated/ideal/C05_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b1098d5fed30220fd2ab7b3a3cea9ada7106352944370d625eb646d69dfac61 +size 1464417 diff --git a/test/images/simulated/ideal/C05_FB2.png b/test/images/simulated/ideal/C05_FB2.png new file mode 100644 index 0000000..5974fc4 --- /dev/null +++ b/test/images/simulated/ideal/C05_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f6adb43d23e456a76835f321fb12fe904cdeddf9d4992cd21ed42f21df770c +size 1447836 diff --git a/test/images/simulated/ideal/C05_FB3.png b/test/images/simulated/ideal/C05_FB3.png new file mode 100644 index 0000000..d3ac724 --- /dev/null +++ b/test/images/simulated/ideal/C05_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3073546fc5b4684634175ec98076d2b2f7cadef10cd859964f4fbe01c43aadbe +size 1471802 diff --git a/test/images/simulated/ideal/C05_FB4.png b/test/images/simulated/ideal/C05_FB4.png new file mode 100644 index 0000000..f00c811 --- /dev/null +++ b/test/images/simulated/ideal/C05_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7438fde2cd15be601fd0b47607d56fa74f91052888382a94657fd82b4e5afc6 +size 1447899 diff --git a/test/images/simulated/ideal/C05_FC1.png b/test/images/simulated/ideal/C05_FC1.png new file mode 100644 index 0000000..a965fbe --- /dev/null +++ b/test/images/simulated/ideal/C05_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:233ec4ce71d77e35a41d7a00086be4a0dc86c72961099cb037adfb7436535180 +size 1424651 diff --git a/test/images/simulated/ideal/C05_FC2.png b/test/images/simulated/ideal/C05_FC2.png new file mode 100644 index 0000000..fa8cc6c --- /dev/null +++ b/test/images/simulated/ideal/C05_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77a57dda121f8a8efd70400341bcfa5fe214d367ee944e6e4bb555752b4f2a4 +size 1444030 diff --git a/test/images/simulated/ideal/C05_FC3.png b/test/images/simulated/ideal/C05_FC3.png new file mode 100644 index 0000000..6715c23 --- /dev/null +++ b/test/images/simulated/ideal/C05_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3830b1bd476319716fde166728f6c80ed8a956ec068206a732a06dd9192addb1 +size 1424731 diff --git a/test/images/simulated/ideal/C05_FC4.png b/test/images/simulated/ideal/C05_FC4.png new file mode 100644 index 0000000..20fbf76 --- /dev/null +++ b/test/images/simulated/ideal/C05_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7436939d708079eab9894eb4d012b79b3af43e94f624d72925f11f8b81e8113 +size 1444359 diff --git a/test/images/simulated/ideal/C05_FD1.png b/test/images/simulated/ideal/C05_FD1.png new file mode 100644 index 0000000..25c151a --- /dev/null +++ b/test/images/simulated/ideal/C05_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5d737c201d0c90417c5b2ce490a3a9f323108ac232bee4416a0435b1fd59515 +size 1382511 diff --git a/test/images/simulated/ideal/C05_FD2.png b/test/images/simulated/ideal/C05_FD2.png new file mode 100644 index 0000000..a35a3ed --- /dev/null +++ b/test/images/simulated/ideal/C05_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c300dbf4ce5ccfb6a8638a17167ef79da2bf1a65b69dd4a5065c9283ce312d32 +size 1465724 diff --git a/test/images/simulated/ideal/C05_FD3.png b/test/images/simulated/ideal/C05_FD3.png new file mode 100644 index 0000000..f6b8e87 --- /dev/null +++ b/test/images/simulated/ideal/C05_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f41e05bfbd789f16f1447b53cbc6255685e75423bdeffadc3b092c74b777e9d2 +size 1383143 diff --git a/test/images/simulated/ideal/C05_FD4.png b/test/images/simulated/ideal/C05_FD4.png new file mode 100644 index 0000000..6d2f5bb --- /dev/null +++ b/test/images/simulated/ideal/C05_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0bbd64484730d8f94d281bc6e70c417d7789f3a6306102b72cf8709398b3a48 +size 1465517 diff --git a/test/images/simulated/ideal/C05_FE1.png b/test/images/simulated/ideal/C05_FE1.png new file mode 100644 index 0000000..ddec8cb --- /dev/null +++ b/test/images/simulated/ideal/C05_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef2c6cfc6da51269c6e39416d5bd581f99c7a4575bb7f350f87d0f089007e2c +size 1385547 diff --git a/test/images/simulated/ideal/C05_FE2.png b/test/images/simulated/ideal/C05_FE2.png new file mode 100644 index 0000000..0b79678 --- /dev/null +++ b/test/images/simulated/ideal/C05_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:303eff0f2bf3650c800c2f2a64c9660441a62e2bd718232685ee6bedc750e3b9 +size 1400681 diff --git a/test/images/simulated/ideal/C05_FE3.png b/test/images/simulated/ideal/C05_FE3.png new file mode 100644 index 0000000..61302e4 --- /dev/null +++ b/test/images/simulated/ideal/C05_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf013242bec7ffeb3cefb38374b1fde2a5f3543d61e39f39c3cfbffadef5b042 +size 1372127 diff --git a/test/images/simulated/ideal/C06_FA0.png b/test/images/simulated/ideal/C06_FA0.png new file mode 100644 index 0000000..32ca22c --- /dev/null +++ b/test/images/simulated/ideal/C06_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ad5fffceef21d02fb152249a8d63436f1a0e1d541e11b67d2be32c233ad36e8 +size 1406171 diff --git a/test/images/simulated/ideal/C06_FB1.png b/test/images/simulated/ideal/C06_FB1.png new file mode 100644 index 0000000..9234af2 --- /dev/null +++ b/test/images/simulated/ideal/C06_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62e0acfa0f1017f830c70f4ece8993829c50a6cfdf82eb24f92e3e2120686d1d +size 1444707 diff --git a/test/images/simulated/ideal/C06_FB2.png b/test/images/simulated/ideal/C06_FB2.png new file mode 100644 index 0000000..e166dce --- /dev/null +++ b/test/images/simulated/ideal/C06_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5ad4bc93e0dd7a2e1be50ebea809b1f343fd2069ff267aac2596275a4ee55d6 +size 1467964 diff --git a/test/images/simulated/ideal/C06_FB3.png b/test/images/simulated/ideal/C06_FB3.png new file mode 100644 index 0000000..b4e26a4 --- /dev/null +++ b/test/images/simulated/ideal/C06_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fc8ab5297a12583366278b562ddb2884a97316a39111f828841678f4e33dace +size 1450365 diff --git a/test/images/simulated/ideal/C06_FB4.png b/test/images/simulated/ideal/C06_FB4.png new file mode 100644 index 0000000..4a83751 --- /dev/null +++ b/test/images/simulated/ideal/C06_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:879a6bf125418eb369c20d4a2ac45cb4e2cd1838d365c47daf4a6eb4663c0363 +size 1469098 diff --git a/test/images/simulated/ideal/C06_FC1.png b/test/images/simulated/ideal/C06_FC1.png new file mode 100644 index 0000000..0489b9c --- /dev/null +++ b/test/images/simulated/ideal/C06_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3789dcd9c1dcfe07460be57210cd79800326807f58d18b011586630947bc7c5b +size 1434857 diff --git a/test/images/simulated/ideal/C06_FC2.png b/test/images/simulated/ideal/C06_FC2.png new file mode 100644 index 0000000..28b87be --- /dev/null +++ b/test/images/simulated/ideal/C06_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4ca1a3548c2b5ed860dbea8b0751b32fae9b9857bca50b74d88e88251484762 +size 1415940 diff --git a/test/images/simulated/ideal/C06_FC3.png b/test/images/simulated/ideal/C06_FC3.png new file mode 100644 index 0000000..feb6072 --- /dev/null +++ b/test/images/simulated/ideal/C06_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:304babaccab1b59052d550370497331bcde7cabb97e8e2d6893ccc5581aab64e +size 1433852 diff --git a/test/images/simulated/ideal/C06_FC4.png b/test/images/simulated/ideal/C06_FC4.png new file mode 100644 index 0000000..7b7594c --- /dev/null +++ b/test/images/simulated/ideal/C06_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:251a260f4c9bb1b0bfeb36dfef530662783f54f83fb758ed840c19bf6c05ea4f +size 1415818 diff --git a/test/images/simulated/ideal/C06_FD1.png b/test/images/simulated/ideal/C06_FD1.png new file mode 100644 index 0000000..3a0977d --- /dev/null +++ b/test/images/simulated/ideal/C06_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54c3c0192c4511bfb80f7cdf8736cfa12200d97e46ff2a9c7ed98a50f118333d +size 1451613 diff --git a/test/images/simulated/ideal/C06_FD2.png b/test/images/simulated/ideal/C06_FD2.png new file mode 100644 index 0000000..b0b20d5 --- /dev/null +++ b/test/images/simulated/ideal/C06_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:794ba5135469a967b582018d6406fca47251c7c729baa53de3afa3c8f5bc545c +size 1390968 diff --git a/test/images/simulated/ideal/C06_FD3.png b/test/images/simulated/ideal/C06_FD3.png new file mode 100644 index 0000000..ff8f6e5 --- /dev/null +++ b/test/images/simulated/ideal/C06_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1bffd01381cc75aa7624cf690c5275f8ee9b96cb5b906d4faef0746f009069b +size 1457586 diff --git a/test/images/simulated/ideal/C06_FD4.png b/test/images/simulated/ideal/C06_FD4.png new file mode 100644 index 0000000..3b6e1a8 --- /dev/null +++ b/test/images/simulated/ideal/C06_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6b31f3997769e9c3435950cc00694815689dad6da2310bba93b7bc96cc8b0c0 +size 1392008 diff --git a/test/images/simulated/ideal/C06_FE1.png b/test/images/simulated/ideal/C06_FE1.png new file mode 100644 index 0000000..aa57df8 --- /dev/null +++ b/test/images/simulated/ideal/C06_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c61f7ad9caf37e732a61acee50f01c19140146135ec7205d75e5ad037fa2629 +size 1406496 diff --git a/test/images/simulated/ideal/C06_FE2.png b/test/images/simulated/ideal/C06_FE2.png new file mode 100644 index 0000000..4679ced --- /dev/null +++ b/test/images/simulated/ideal/C06_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ba27216320e72a1e4a88be72cabab6c8749b07285228b183a9cf1265f631ad4 +size 1412298 diff --git a/test/images/simulated/ideal/C06_FE3.png b/test/images/simulated/ideal/C06_FE3.png new file mode 100644 index 0000000..ba83c7f --- /dev/null +++ b/test/images/simulated/ideal/C06_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abd8705796db2f3616fbe87e3bb58f375ea55e78e7e0c2eb55a924c6d68d525f +size 1388445 diff --git a/test/images/simulated/ideal/C07_FA0.png b/test/images/simulated/ideal/C07_FA0.png new file mode 100644 index 0000000..6f948e3 --- /dev/null +++ b/test/images/simulated/ideal/C07_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dacb48879077048a21948605351636728d93844aaaaeaae239cdd0144fc0f066 +size 1455637 diff --git a/test/images/simulated/ideal/C07_FB1.png b/test/images/simulated/ideal/C07_FB1.png new file mode 100644 index 0000000..e274a8c --- /dev/null +++ b/test/images/simulated/ideal/C07_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ed4a6fcbff63345cc4a75ef7ac64d0aeb6e9581a5f3ce6555e3c07a17a411c8 +size 1441441 diff --git a/test/images/simulated/ideal/C07_FB2.png b/test/images/simulated/ideal/C07_FB2.png new file mode 100644 index 0000000..ca1c83a --- /dev/null +++ b/test/images/simulated/ideal/C07_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9340b6c0d31abfdb8be3021e5aa933c63b64cb0e7236f7429462bd16fde89b33 +size 1457934 diff --git a/test/images/simulated/ideal/C07_FB3.png b/test/images/simulated/ideal/C07_FB3.png new file mode 100644 index 0000000..b0729ae --- /dev/null +++ b/test/images/simulated/ideal/C07_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f46cb7f327581701870499be6a33c4593703aa469ee7dece6316e8d11979a36a +size 1408004 diff --git a/test/images/simulated/ideal/C07_FB4.png b/test/images/simulated/ideal/C07_FB4.png new file mode 100644 index 0000000..27af6fd --- /dev/null +++ b/test/images/simulated/ideal/C07_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fb3b1eedc401d2827a1389d28767463bb773de08ba10ab470e1b574d8689cae +size 1455624 diff --git a/test/images/simulated/ideal/C07_FC1.png b/test/images/simulated/ideal/C07_FC1.png new file mode 100644 index 0000000..f9eb8c2 --- /dev/null +++ b/test/images/simulated/ideal/C07_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ffbc10bbbf556e398b26283855ffd8f215f84aa37ff96cc20f42921c314ae2a +size 1418109 diff --git a/test/images/simulated/ideal/C07_FC2.png b/test/images/simulated/ideal/C07_FC2.png new file mode 100644 index 0000000..215b817 --- /dev/null +++ b/test/images/simulated/ideal/C07_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef1b3f7e3006bb214e79c9fb89e5dce1376b2d0ec209f7694f2d7c97e23e2ed4 +size 1429889 diff --git a/test/images/simulated/ideal/C07_FC3.png b/test/images/simulated/ideal/C07_FC3.png new file mode 100644 index 0000000..19e2e39 --- /dev/null +++ b/test/images/simulated/ideal/C07_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a971672d74eeeb2feb2ffb81e24a0933d1f414a7768b1a0e2a98af8e36ed39c +size 1416908 diff --git a/test/images/simulated/ideal/C07_FC4.png b/test/images/simulated/ideal/C07_FC4.png new file mode 100644 index 0000000..d1e10df --- /dev/null +++ b/test/images/simulated/ideal/C07_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ed7296fea9065f2c0a1a542355e2f14a07e7a0c9918fa23c997e1f65bfa4806 +size 1408305 diff --git a/test/images/simulated/ideal/C07_FD1.png b/test/images/simulated/ideal/C07_FD1.png new file mode 100644 index 0000000..3507d38 --- /dev/null +++ b/test/images/simulated/ideal/C07_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b68923b96f5e88c82e984df5538100ecf58b10c30ef79d1de24b6d7fea477c7 +size 1347988 diff --git a/test/images/simulated/ideal/C07_FD2.png b/test/images/simulated/ideal/C07_FD2.png new file mode 100644 index 0000000..c4b8c68 --- /dev/null +++ b/test/images/simulated/ideal/C07_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1b4ae9bfb8f1856ca32ae3702aa72199cbd274b015fdde038f00e120eaec133 +size 1451198 diff --git a/test/images/simulated/ideal/C07_FD3.png b/test/images/simulated/ideal/C07_FD3.png new file mode 100644 index 0000000..430ec16 --- /dev/null +++ b/test/images/simulated/ideal/C07_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a39770d23186f675fa9129f20025e524d6b1c7d5e3836d64020a6b8948390fa +size 1462287 diff --git a/test/images/simulated/ideal/C07_FD4.png b/test/images/simulated/ideal/C07_FD4.png new file mode 100644 index 0000000..40b57fe --- /dev/null +++ b/test/images/simulated/ideal/C07_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71989ae5355a9df1fda7e164eb1b840c53fd624f36c9e2095afa1bdd66115861 +size 1359418 diff --git a/test/images/simulated/ideal/C07_FE1.png b/test/images/simulated/ideal/C07_FE1.png new file mode 100644 index 0000000..63596e5 --- /dev/null +++ b/test/images/simulated/ideal/C07_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f552e00d27c043378904fdfaf6dcb94b8991a374771b991794328a57ba59e912 +size 1456098 diff --git a/test/images/simulated/ideal/C07_FE2.png b/test/images/simulated/ideal/C07_FE2.png new file mode 100644 index 0000000..7835755 --- /dev/null +++ b/test/images/simulated/ideal/C07_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:904bb2fe73694beecd372df9fb8beb6b8857aac06e4dc49819c1ae36c38f26f1 +size 1470780 diff --git a/test/images/simulated/ideal/C07_FE3.png b/test/images/simulated/ideal/C07_FE3.png new file mode 100644 index 0000000..9238501 --- /dev/null +++ b/test/images/simulated/ideal/C07_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c56ac97149e481139993f8153cecdef5d9056da5e3d19beee056566c50d176c +size 1382668 diff --git a/test/images/simulated/ideal/C08_FA0.png b/test/images/simulated/ideal/C08_FA0.png new file mode 100644 index 0000000..ac8685e --- /dev/null +++ b/test/images/simulated/ideal/C08_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c81fb4e0ce9e02eb8f51edc8dd51e68056b3bbfbff79aa868e1f02bd7ff31b7 +size 1406754 diff --git a/test/images/simulated/ideal/C08_FB1.png b/test/images/simulated/ideal/C08_FB1.png new file mode 100644 index 0000000..e5231e8 --- /dev/null +++ b/test/images/simulated/ideal/C08_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8298e295fe9fa7f532132be667b18c878257fbe337688c74b1dcdf45f612c2ac +size 1456102 diff --git a/test/images/simulated/ideal/C08_FB2.png b/test/images/simulated/ideal/C08_FB2.png new file mode 100644 index 0000000..d355634 --- /dev/null +++ b/test/images/simulated/ideal/C08_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96c3f7663eba411177b4baf47e341436da590c812eb4feef3862a731161b9dc8 +size 1456738 diff --git a/test/images/simulated/ideal/C08_FB3.png b/test/images/simulated/ideal/C08_FB3.png new file mode 100644 index 0000000..1dae337 --- /dev/null +++ b/test/images/simulated/ideal/C08_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24977cdabee932e1b8d4fb764dbd0fc1ac08abf8fce7a9db52559410aa9cf82d +size 1469920 diff --git a/test/images/simulated/ideal/C08_FB4.png b/test/images/simulated/ideal/C08_FB4.png new file mode 100644 index 0000000..b19eb3f --- /dev/null +++ b/test/images/simulated/ideal/C08_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:befc031032a566365352ed5526ae347274c1b461c770ef90faf6039ec8398367 +size 1465215 diff --git a/test/images/simulated/ideal/C08_FC1.png b/test/images/simulated/ideal/C08_FC1.png new file mode 100644 index 0000000..2a8822d --- /dev/null +++ b/test/images/simulated/ideal/C08_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9b9a15d49e70dbe03c69eff09ecce5be6dfe6424a054e33f708a8e029322ee0 +size 1434880 diff --git a/test/images/simulated/ideal/C08_FC2.png b/test/images/simulated/ideal/C08_FC2.png new file mode 100644 index 0000000..d80bd5d --- /dev/null +++ b/test/images/simulated/ideal/C08_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ce1309c902db34353438ab810b28277df7a850596a297d23f1521d8a3795acb +size 1437551 diff --git a/test/images/simulated/ideal/C08_FC3.png b/test/images/simulated/ideal/C08_FC3.png new file mode 100644 index 0000000..e29538e --- /dev/null +++ b/test/images/simulated/ideal/C08_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1567f6771d92fe4e8772879825fa11226c37d36e124c9a2d68a8549ca08ad0fd +size 1411023 diff --git a/test/images/simulated/ideal/C08_FC4.png b/test/images/simulated/ideal/C08_FC4.png new file mode 100644 index 0000000..d25eace --- /dev/null +++ b/test/images/simulated/ideal/C08_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb9bbe6cd68a56acf594c24040942c02f94b065e13d3743b8f23bada9fce82ea +size 1410493 diff --git a/test/images/simulated/ideal/C08_FD1.png b/test/images/simulated/ideal/C08_FD1.png new file mode 100644 index 0000000..8c929c3 --- /dev/null +++ b/test/images/simulated/ideal/C08_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac9c13b67c4d88add9284d004518d84e2c6d34d838590d0c411d40f022c65482 +size 1457331 diff --git a/test/images/simulated/ideal/C08_FD2.png b/test/images/simulated/ideal/C08_FD2.png new file mode 100644 index 0000000..5fc7d66 --- /dev/null +++ b/test/images/simulated/ideal/C08_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66e3ae7a85cab435b377a96a93c94161519a63da4ad51d523f03f0f1aebf04d6 +size 1451626 diff --git a/test/images/simulated/ideal/C08_FD3.png b/test/images/simulated/ideal/C08_FD3.png new file mode 100644 index 0000000..aec2405 --- /dev/null +++ b/test/images/simulated/ideal/C08_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8dfd8e7675cdbe1fed4d535e9b50fe673ea9324a60bcd2ac3793879a1b01173 +size 1392683 diff --git a/test/images/simulated/ideal/C08_FD4.png b/test/images/simulated/ideal/C08_FD4.png new file mode 100644 index 0000000..4754820 --- /dev/null +++ b/test/images/simulated/ideal/C08_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:210554b1f781ea28d5c9193a04aeaa0a059ed37eb84fb986d9d20c66d952fbd3 +size 1388830 diff --git a/test/images/simulated/ideal/C08_FE1.png b/test/images/simulated/ideal/C08_FE1.png new file mode 100644 index 0000000..d91cb9b --- /dev/null +++ b/test/images/simulated/ideal/C08_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7929a01499f7a0b6e41c19d4cb5c0c9467b46f0bdae2fc9b7590ad9fea58e98 +size 1453082 diff --git a/test/images/simulated/ideal/C08_FE2.png b/test/images/simulated/ideal/C08_FE2.png new file mode 100644 index 0000000..fa381b3 --- /dev/null +++ b/test/images/simulated/ideal/C08_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f7baaea88b3d6c4cb8fe880b8fa9c888ba5548a85e7b68a8c1da39d5343636c +size 1330413 diff --git a/test/images/simulated/ideal/C08_FE3.png b/test/images/simulated/ideal/C08_FE3.png new file mode 100644 index 0000000..bff8971 --- /dev/null +++ b/test/images/simulated/ideal/C08_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20f1457f88997800a7e8f0eba78b5d7e08b6affe2bf4132b0b1caff23beb3867 +size 1466288 diff --git a/test/images/simulated/ideal/C09_FA0.png b/test/images/simulated/ideal/C09_FA0.png new file mode 100644 index 0000000..d556b93 --- /dev/null +++ b/test/images/simulated/ideal/C09_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aef88bced90e03fb41912fbdfe976f44e9e10e9e1b6924b907b963e709aa709 +size 1454499 diff --git a/test/images/simulated/ideal/C09_FB1.png b/test/images/simulated/ideal/C09_FB1.png new file mode 100644 index 0000000..f4d1ab1 --- /dev/null +++ b/test/images/simulated/ideal/C09_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cd8c3027779cf0900f8bb0f635c531167fe42e65671c7eb15c277e9f17ed4c7 +size 1470716 diff --git a/test/images/simulated/ideal/C09_FB2.png b/test/images/simulated/ideal/C09_FB2.png new file mode 100644 index 0000000..7602b07 --- /dev/null +++ b/test/images/simulated/ideal/C09_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bdb28e5630239b58ca76a756633c279e2d7ca7c3bc30a76feffd93ecd1a6324 +size 1434806 diff --git a/test/images/simulated/ideal/C09_FB3.png b/test/images/simulated/ideal/C09_FB3.png new file mode 100644 index 0000000..ab3b0b7 --- /dev/null +++ b/test/images/simulated/ideal/C09_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5bcbf81ee87d4c76e45dc66f5a9832fb2f2ba33faf2e78ef0233311c85a94a7 +size 1452240 diff --git a/test/images/simulated/ideal/C09_FB4.png b/test/images/simulated/ideal/C09_FB4.png new file mode 100644 index 0000000..30bdbbf --- /dev/null +++ b/test/images/simulated/ideal/C09_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59544d44495c6551e086d3946f1eadb35c4874b626f0999f99c799fc2812c2b1 +size 1411411 diff --git a/test/images/simulated/ideal/C09_FC1.png b/test/images/simulated/ideal/C09_FC1.png new file mode 100644 index 0000000..2935f74 --- /dev/null +++ b/test/images/simulated/ideal/C09_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0a441f28cd0e901d78aeb677cb5d7cf0e7867666de17f211d4e9cb84104235b +size 1425882 diff --git a/test/images/simulated/ideal/C09_FC2.png b/test/images/simulated/ideal/C09_FC2.png new file mode 100644 index 0000000..cd5ee43 --- /dev/null +++ b/test/images/simulated/ideal/C09_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:471cc1bcaf0d82dcd8894ccbeba081da1074a0040295749cd42c03698eb9734e +size 1415622 diff --git a/test/images/simulated/ideal/C09_FC3.png b/test/images/simulated/ideal/C09_FC3.png new file mode 100644 index 0000000..185ef81 --- /dev/null +++ b/test/images/simulated/ideal/C09_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51f5b09c18152a8ae5e477880cc8400be7aa7c0510506156095e65abb739a07d +size 1407173 diff --git a/test/images/simulated/ideal/C09_FC4.png b/test/images/simulated/ideal/C09_FC4.png new file mode 100644 index 0000000..779635a --- /dev/null +++ b/test/images/simulated/ideal/C09_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31753feb1ac18f05b302cb00841c4c10984afa6b489fa0c64bdc47ba67a84c4b +size 1413932 diff --git a/test/images/simulated/ideal/C09_FD1.png b/test/images/simulated/ideal/C09_FD1.png new file mode 100644 index 0000000..142dff0 --- /dev/null +++ b/test/images/simulated/ideal/C09_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c9f0b32ab060eedc28eb6129016ad277579de399e1353350a55a529ea2788ec +size 1460547 diff --git a/test/images/simulated/ideal/C09_FD2.png b/test/images/simulated/ideal/C09_FD2.png new file mode 100644 index 0000000..c366f8c --- /dev/null +++ b/test/images/simulated/ideal/C09_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56f58d9c3b06c9d1eaba2a0ac0e44b82e3542fd67fc18d1a0888554ae020a79d +size 1347209 diff --git a/test/images/simulated/ideal/C09_FD3.png b/test/images/simulated/ideal/C09_FD3.png new file mode 100644 index 0000000..6b218b7 --- /dev/null +++ b/test/images/simulated/ideal/C09_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75f1ab0a4e5ff4b332c54a042e0e0dc17d89ed47dc3d87baea90246aa11a3096 +size 1359330 diff --git a/test/images/simulated/ideal/C09_FD4.png b/test/images/simulated/ideal/C09_FD4.png new file mode 100644 index 0000000..c194e2a --- /dev/null +++ b/test/images/simulated/ideal/C09_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62c15b2ab308b105358f32c994f68ae198b89965964450767a0498ebb869bc03 +size 1449371 diff --git a/test/images/simulated/ideal/C09_FE1.png b/test/images/simulated/ideal/C09_FE1.png new file mode 100644 index 0000000..78aceee --- /dev/null +++ b/test/images/simulated/ideal/C09_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e179f3870581d079b6a2e95f075fcff7675d344dbd91807c2bb9c8e073b1dff8 +size 1384540 diff --git a/test/images/simulated/ideal/C09_FE2.png b/test/images/simulated/ideal/C09_FE2.png new file mode 100644 index 0000000..edbd14d --- /dev/null +++ b/test/images/simulated/ideal/C09_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8f2f765e68bd862f0d12f04238c0be571d02e1d2fbff11660bc443c10444e64 +size 1464999 diff --git a/test/images/simulated/ideal/C09_FE3.png b/test/images/simulated/ideal/C09_FE3.png new file mode 100644 index 0000000..0b9fb0a --- /dev/null +++ b/test/images/simulated/ideal/C09_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2679319695762e3768b79cce49a32ef8c6eceed31f21c49712a7d6d6bae297d4 +size 1453128 diff --git a/test/images/simulated/ideal/C10_FA0.png b/test/images/simulated/ideal/C10_FA0.png new file mode 100644 index 0000000..6cdf17c --- /dev/null +++ b/test/images/simulated/ideal/C10_FA0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:436bf777b12596f68deeb0234fd7339801bc470b9078bc1b36670354a6ec6574 +size 1407422 diff --git a/test/images/simulated/ideal/C10_FB1.png b/test/images/simulated/ideal/C10_FB1.png new file mode 100644 index 0000000..afd5658 --- /dev/null +++ b/test/images/simulated/ideal/C10_FB1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8410068983f2f043c9821e61113c521ca855fb7abee255faa6843ccbfdb44dc7 +size 1469827 diff --git a/test/images/simulated/ideal/C10_FB2.png b/test/images/simulated/ideal/C10_FB2.png new file mode 100644 index 0000000..3474d4d --- /dev/null +++ b/test/images/simulated/ideal/C10_FB2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a22b6aa3297aba3a37c8fed8ac14e85e4afbc4820b6c813b531d01cc65a8862 +size 1451378 diff --git a/test/images/simulated/ideal/C10_FB3.png b/test/images/simulated/ideal/C10_FB3.png new file mode 100644 index 0000000..f5e933b --- /dev/null +++ b/test/images/simulated/ideal/C10_FB3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:542021d9d943d556138b81b51c81eeaf14af38cc5644d27bd56799f28d53a8cf +size 1469453 diff --git a/test/images/simulated/ideal/C10_FB4.png b/test/images/simulated/ideal/C10_FB4.png new file mode 100644 index 0000000..f42ce30 --- /dev/null +++ b/test/images/simulated/ideal/C10_FB4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51f96973947502a894084dde80e42455ec3e446b47075ed3281bee4a83553f03 +size 1444926 diff --git a/test/images/simulated/ideal/C10_FC1.png b/test/images/simulated/ideal/C10_FC1.png new file mode 100644 index 0000000..4f86af4 --- /dev/null +++ b/test/images/simulated/ideal/C10_FC1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daf90b7cc95f51f6b69dadcc9861d5756f2cc4ba73850bf4ec06bd9e95fe068b +size 1416813 diff --git a/test/images/simulated/ideal/C10_FC2.png b/test/images/simulated/ideal/C10_FC2.png new file mode 100644 index 0000000..cc406d2 --- /dev/null +++ b/test/images/simulated/ideal/C10_FC2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7e9b31ddd50c58bcfcb51e92b76b468984d22f7af9d5b4295226ffd9a61892b +size 1435777 diff --git a/test/images/simulated/ideal/C10_FC3.png b/test/images/simulated/ideal/C10_FC3.png new file mode 100644 index 0000000..29a7b82 --- /dev/null +++ b/test/images/simulated/ideal/C10_FC3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2650ee3240f42f18f9fb34d3fde270c88970a16d5e2146209d7a064d78e224c0 +size 1417079 diff --git a/test/images/simulated/ideal/C10_FC4.png b/test/images/simulated/ideal/C10_FC4.png new file mode 100644 index 0000000..ca2515a --- /dev/null +++ b/test/images/simulated/ideal/C10_FC4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d64feb8705e54c7daf8ac9d7dcc05dda3250b37c2d3212f4bc243a5e544b104a +size 1434182 diff --git a/test/images/simulated/ideal/C10_FD1.png b/test/images/simulated/ideal/C10_FD1.png new file mode 100644 index 0000000..e103ff0 --- /dev/null +++ b/test/images/simulated/ideal/C10_FD1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:285f70db82065551d8c1f4e778940f1f1076d08e8dfedf59e7945ee742f619d5 +size 1394497 diff --git a/test/images/simulated/ideal/C10_FD2.png b/test/images/simulated/ideal/C10_FD2.png new file mode 100644 index 0000000..140e874 --- /dev/null +++ b/test/images/simulated/ideal/C10_FD2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94560ee019a6c3bc91e94cbde7da07802cb7776ccac76be08347726ee95c2de9 +size 1458368 diff --git a/test/images/simulated/ideal/C10_FD3.png b/test/images/simulated/ideal/C10_FD3.png new file mode 100644 index 0000000..8566e8b --- /dev/null +++ b/test/images/simulated/ideal/C10_FD3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b23f3090b49f5ae9c18a5f568417968c481be52ba32aa78e020c3b0ede5658b +size 1391236 diff --git a/test/images/simulated/ideal/C10_FD4.png b/test/images/simulated/ideal/C10_FD4.png new file mode 100644 index 0000000..991a809 --- /dev/null +++ b/test/images/simulated/ideal/C10_FD4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e7633d8a2370eda0ebec2d108259e3ac52507d498ff81a6e07b728e2ea00f78 +size 1452312 diff --git a/test/images/simulated/ideal/C10_FE1.png b/test/images/simulated/ideal/C10_FE1.png new file mode 100644 index 0000000..141561d --- /dev/null +++ b/test/images/simulated/ideal/C10_FE1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a689f9ffb8040ae50e6eadbc9c31b2832e2b8a5c37af3df812ab6a58af95489 +size 1389347 diff --git a/test/images/simulated/ideal/C10_FE2.png b/test/images/simulated/ideal/C10_FE2.png new file mode 100644 index 0000000..e5878f3 --- /dev/null +++ b/test/images/simulated/ideal/C10_FE2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b3dd03dc4e8a82d9627a3e0ab994895cf085ba08df60fe42eb5a64464820dbf +size 1409084 diff --git a/test/images/simulated/ideal/C10_FE3.png b/test/images/simulated/ideal/C10_FE3.png new file mode 100644 index 0000000..f6b70aa --- /dev/null +++ b/test/images/simulated/ideal/C10_FE3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bf89d858b05a6c7929f6031f4664b683554e77ae0ca0dc8121396e92e0fe96b +size 1417623 diff --git a/test/images/simulated/ideal/about.txt b/test/images/simulated/ideal/about.txt new file mode 100644 index 0000000..654ddce --- /dev/null +++ b/test/images/simulated/ideal/about.txt @@ -0,0 +1,13 @@ +This folder contains a set of simulated images in order to help test code. +Each file is named as follows: +C_F.png + +Each camera is 1 meter away from a central pivot point at the origin. +Each camera has its y axis aligned with the world's up direction (+z-axis in Blender). +Cameras 01 through 05 are all oriented 30 degrees relative to the table surface, each rotated 45 degrees around the world origin. +Cameras 06 through 10 are all oriented 45 degrees relative to the table surface, each rotated 45 degrees around the world origin +All cameras use the same viewing angle and image resolutions. + +Frames from the A and B sets will have the ChArUco board visible. +Frames from the C and D sets will have the ChArUco board at least partially visible to most cameras but not all. +Frames from the E set are designed to be visible only to specific cameras. diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py new file mode 100644 index 0000000..599c369 --- /dev/null +++ b/test/test_extrinsic_calibration.py @@ -0,0 +1,105 @@ +import cv2 +import numpy +import os +import re +from src.common import ImageCoding, StatusMessageSource +from src.common.structures import \ + CORNER_REFINEMENT_METHOD_SUBPIX, \ + ImageResolution, \ + IntrinsicCalibration, \ + KeyValueSimpleAny, \ + KeyValueSimpleString, \ + MarkerSnapshot +from src.detector import Calibrator +from src.detector.implementations.marker_aruco_opencv import ArucoOpenCVMarker +from src.detector.structures import CalibratorConfiguration +from src.detector.util import KEY_CORNER_REFINEMENT_METHOD +from tempfile import TemporaryDirectory +from typing import Final +import unittest + + +IMAGE_CONTENT_PATTERN: Final[re.Pattern] = re.compile(r"C([a-zA-Z0-9]+)_F([a-zA-Z0-9]+).png") +IMAGE_CONTENT_MATCH_INDEX_CAMERA: Final[int] = 1 +IMAGE_CONTENT_MATCH_INDEX_FRAME: Final[int] = 2 +IMAGE_RESOLUTION: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) +MARKER_DETECTION_PARAMETERS: list[KeyValueSimpleAny] = [ + KeyValueSimpleString( + key=KEY_CORNER_REFINEMENT_METHOD, + value=CORNER_REFINEMENT_METHOD_SUBPIX)] + + +class TestPoseSolver(unittest.TestCase): + def test(self): + status_message_source: StatusMessageSource = StatusMessageSource( + source_label="test", + send_to_logger=True) # Python built-in logger + + # Organize ourselves with respect to the input data + image_location: str = os.path.join("images", "simulated", "ideal") + image_contents: list[str] = os.listdir(image_location) + image_filepaths: dict[str, dict[str, str]] = dict() # Access as: images[CameraID][FrameID] + for image_content in image_contents: + if image_content == "about.txt": + continue + + image_filepath: str = os.path.join(image_location, image_content) + if not os.path.isfile(image_filepath): + continue + + match: re.Match = IMAGE_CONTENT_PATTERN.match(image_content) + if match is None: + self.fail( + f"The input filename {image_content} did not match the expected pattern. " + "Were files moved or added?") + + camera_id: str = match.group(IMAGE_CONTENT_MATCH_INDEX_CAMERA) + frame_id: str = match.group(IMAGE_CONTENT_MATCH_INDEX_FRAME) + if camera_id not in image_filepaths: + image_filepaths[camera_id] = dict() + image_filepaths[camera_id][frame_id] = image_filepath + image_count: int = sum(len(image_filepaths[camera_id]) for camera_id in image_filepaths.keys()) + message = f"Found {image_count} image files." + status_message_source.enqueue_status_message(severity="info", message=message) + + # All cameras have the same imaging parameters. + # To simplify our lives and ensure a reasonable result, + # we'll calibrate all of the cameras with the same set of input images. + # We'll use all images from the A# and B# sets of frames. + calibration_result: IntrinsicCalibration | None = None + with TemporaryDirectory() as temppath: + calibrator: Calibrator = Calibrator( + configuration=CalibratorConfiguration(data_path=temppath), + status_message_source=status_message_source) + for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): + for frame_id, image_filepath in image_filepaths_by_frame_id.items(): + if not frame_id.startswith("A") and not frame_id.startswith("B"): + continue + image: numpy.ndarray = cv2.imread(image_filepath) + image_base64: str = ImageCoding.image_to_base64(image) + calibrator.add_image(image_base64) + _, calibration_result = calibrator.calculate( + image_resolution=IMAGE_RESOLUTION, + marker_parameters=MARKER_DETECTION_PARAMETERS) + + print(calibration_result.model_dump()) + return + + marker: ArucoOpenCVMarker = ArucoOpenCVMarker( + configuration={"method": "aruco_opencv"}, + status_message_source=status_message_source) + marker.set_parameters(parameters=MARKER_DETECTION_PARAMETERS) + image_marker_snapshots: dict[str, dict[str, list[MarkerSnapshot]]] = dict() + detection_count: int = 0 + for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): + for frame_id, image_filepath in image_filepaths_by_frame_id.items(): + if camera_id not in image_marker_snapshots: + image_marker_snapshots[camera_id] = dict() + image: numpy.ndarray = cv2.imread(image_filepath) + marker.update(image) + marker_snapshots: list[MarkerSnapshot] = marker.get_markers_detected() + image_marker_snapshots[camera_id][frame_id] = marker_snapshots + detection_count += len(marker_snapshots) + message = f"{detection_count} detections." + status_message_source.enqueue_status_message(severity="info", message=message) + print(message) From f8b6be8e8de83b06c81ed57d2ef8a02b137c9ea9 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Fri, 27 Jun 2025 14:39:19 -0400 Subject: [PATCH 02/33] MNT: Update OpenCV 4.11 (appears to be more robust for extrinsic calibration) --- requirements.txt | 2 +- src/common/structures/charuco_board_specification.py | 11 +++++------ src/detector/calibrator.py | 4 ++-- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/requirements.txt b/requirements.txt index ca402d9..f9ed91d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ fastapi hjson numpy~=1.26 numpy-stl~=3.1 -opencv-contrib-python==4.5.5.64 +opencv-contrib-python~=4.11 pydantic>=2 PyOpenGL==3.1.7 scipy diff --git a/src/common/structures/charuco_board_specification.py b/src/common/structures/charuco_board_specification.py index a8593bd..6c224a5 100644 --- a/src/common/structures/charuco_board_specification.py +++ b/src/common/structures/charuco_board_specification.py @@ -28,12 +28,11 @@ def size_mm(self) -> Tuple[float, float]: return board_size_x_mm, board_size_y_mm def create_board(self) -> Any: # type cv2.aruco.CharucoBoard - charuco_board = cv2.aruco.CharucoBoard_create( - self.square_count_x, - self.square_count_y, - self.square_size_px, - self.marker_size_px, - self.aruco_dictionary()) + charuco_board = cv2.aruco.CharucoBoard( + size=(self.square_count_x, self.square_count_y), + squareLength=self.square_size_px, + markerLength=self.marker_size_px, + dictionary=self.aruco_dictionary()) return charuco_board def get_marker_center_points(self) -> list[list[float]]: diff --git a/src/detector/calibrator.py b/src/detector/calibrator.py index bc661bc..2a4fa27 100644 --- a/src/detector/calibrator.py +++ b/src/detector/calibrator.py @@ -112,7 +112,7 @@ def calculate( raise MCTDetectorRuntimeError( message=f"No images for given resolution {str(image_resolution)} found.") - aruco_detector_parameters: ... = cv2.aruco.DetectorParameters_create() + aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() mismatched_keys: list[str] = assign_key_value_list_to_aruco_detection_parameters( detection_parameters=aruco_detector_parameters, key_value_list=marker_parameters) @@ -165,7 +165,7 @@ def calculate( board=charuco_board, ) # Algorithm requires a minimum of 4 markers - if len(frame_charuco_corners) >= 4: + if frame_charuco_corners is not None and len(frame_charuco_corners) >= 4: all_charuco_corners.append(frame_charuco_corners) all_charuco_ids.append(frame_charuco_ids) From 0eb1602bb028121236d1f62a7bc3d0a10d17f3b8 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 9 Jul 2025 12:50:27 -0400 Subject: [PATCH 03/33] WIP: Replacement of deprecated functions, consolidation of math logic --- src/board_builder/board_builder.py | 10 +- src/board_builder/structures/pose_location.py | 2 +- .../test/accuracy/accuracy_test.py | 4 +- src/board_builder/test/graph_search_test.py | 2 +- .../test/repeatability/repeatability_test.py | 5 +- .../utils/board_builder_pose_solver.py | 29 +- src/common/mct_component.py | 7 +- src/common/status_message_source.py | 2 +- src/common/structures/__init__.py | 7 +- .../{matrix4x4.py => linear_algebra.py} | 55 ++- src/common/structures/pose.py | 10 - src/common/structures/pose_solver_frame.py | 2 +- src/common/util/__init__.py | 1 - src/common/util/math_utils.py | 440 +++++++++++++++++- .../util/register_corresponding_points.py | 70 --- src/controller/mct_controller.py | 6 +- src/controller/structures/connection.py | 8 +- src/detector/calibrator.py | 9 +- .../camera_opencv_capture_device.py | 4 +- .../implementations/camera_picamera2.py | 2 +- .../implementations/marker_aruco_opencv.py | 4 +- .../structures/calibration_image_metadata.py | 2 +- .../structures/calibration_result_metadata.py | 2 +- src/gui/panels/board_builder_panel.py | 13 +- src/gui/panels/calibrator_panel.py | 4 +- src/gui/panels/pose_solver_panel.py | 2 +- .../panels/specialized/graphics_renderer.py | 12 +- src/pose_solver/pose_solver.py | 52 +-- src/pose_solver/structures.py | 24 +- src/pose_solver/util/__init__.py | 11 - src/pose_solver/util/average_quaternion.py | 17 - src/pose_solver/util/average_vector.py | 17 - src/pose_solver/util/closest_point_on_ray.py | 30 -- .../util/convex_quadrilateral_area.py | 64 --- .../util/iterative_closest_point.py | 235 ---------- src/pose_solver/util/line_intersection.py | 111 ----- test/test_extrinsic_calibration.py | 19 +- ...esponding_points.py => test_math_utils.py} | 115 ++++- test/test_pose_solver.py | 9 +- 39 files changed, 692 insertions(+), 726 deletions(-) rename src/common/structures/{matrix4x4.py => linear_algebra.py} (60%) delete mode 100644 src/common/structures/pose.py delete mode 100644 src/common/util/register_corresponding_points.py delete mode 100644 src/pose_solver/util/__init__.py delete mode 100644 src/pose_solver/util/average_quaternion.py delete mode 100644 src/pose_solver/util/average_vector.py delete mode 100644 src/pose_solver/util/closest_point_on_ray.py delete mode 100644 src/pose_solver/util/convex_quadrilateral_area.py delete mode 100644 src/pose_solver/util/iterative_closest_point.py delete mode 100644 src/pose_solver/util/line_intersection.py rename test/{test_register_corresponding_points.py => test_math_utils.py} (53%) diff --git a/src/board_builder/board_builder.py b/src/board_builder/board_builder.py index 653f867..1ae08d0 100644 --- a/src/board_builder/board_builder.py +++ b/src/board_builder/board_builder.py @@ -146,7 +146,7 @@ def _find_matrix_input_index(self, pose_uuid, other_pose_uuid): def _solve_pose(self, detector_data: dict[str, list[MarkerSnapshot]], timestamp: datetime.datetime): """ Given marker ids and its corner locations, find its pose """ - timestamp = datetime.datetime.utcnow() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: for marker_snapshot in detector_data[detector_name]: if marker_snapshot.label not in list(self._index_to_marker_id.values()): @@ -202,7 +202,7 @@ def _write_corners_dict_to_repeatability_test_file(self, corners_dict): @staticmethod def _write_detector_data_to_recording_file(detector_data: dict[str, list[MarkerSnapshot]], data_description: str): formatted_data = {} - timestamp = datetime.datetime.utcnow().isoformat() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() for detector_name, snapshots in detector_data.items(): formatted_data[detector_name] = [] for snapshot in snapshots: @@ -246,7 +246,7 @@ def locate_reference_board(self, detector_data: dict[str, list[MarkerSnapshot]]) if all(isinstance(v, list) and len(v) == 0 for v in detector_data.values()): return self.detector_poses = [] - timestamp = datetime.datetime.utcnow() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: for marker_snapshot in detector_data[detector_name]: corners_list: list[list[float]] = [] @@ -281,7 +281,7 @@ def collect_data(self, detector_data: dict[str, list[MarkerSnapshot]]): detector_data = self._filter_markers_appearing_in_multiple_detectors(detector_data) if all(isinstance(v, list) and len(v) == 0 for v in detector_data.values()): return - timestamp = datetime.datetime.utcnow() + timestamp = datetime.datetime.now(tz=datetime.timezone.utc) corners_dict = {} self.target_poses = [] self._solve_pose(detector_data, timestamp) @@ -349,7 +349,7 @@ def build_board(self): pose = Pose( target_id=marker_id, object_to_reference_matrix=Matrix4x4.from_numpy_array(np.array(T)), - solver_timestamp_utc_iso8601=str(datetime.datetime.utcnow())) + solver_timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) self.target_poses.append(pose) corners = self._calculate_corners_location(T, self.local_corners) predicted_corners[marker_id] = corners diff --git a/src/board_builder/structures/pose_location.py b/src/board_builder/structures/pose_location.py index 8132de6..c0af262 100644 --- a/src/board_builder/structures/pose_location.py +++ b/src/board_builder/structures/pose_location.py @@ -4,7 +4,7 @@ from numpy._typing import NDArray from typing import Any from scipy.spatial.transform import Rotation as R -from src.pose_solver.util.average_quaternion import average_quaternion +from src.common.util.average_quaternion import average_quaternion from src.common.structures import Matrix4x4, Pose diff --git a/src/board_builder/test/accuracy/accuracy_test.py b/src/board_builder/test/accuracy/accuracy_test.py index 7061a04..f9c9a5a 100644 --- a/src/board_builder/test/accuracy/accuracy_test.py +++ b/src/board_builder/test/accuracy/accuracy_test.py @@ -6,7 +6,7 @@ from structures import AccuracyTestParameters from src.board_builder.board_builder import BoardBuilder -from src.common.util import register_corresponding_points +from src.common.util import MathUtils from src.common.structures import \ MarkerCornerImagePoint, \ MarkerSnapshot, \ @@ -72,7 +72,7 @@ def transform_point(point, matrix): simulated_points: list[list[float]] = simulated_board.get_points() # Get the transformation matrix - transformation_matrix = register_corresponding_points( + transformation_matrix = MathUtils.register_corresponding_points( point_set_from=target_points, point_set_to=simulated_points, use_oomori_mirror_fix=False) diff --git a/src/board_builder/test/graph_search_test.py b/src/board_builder/test/graph_search_test.py index 233f563..3af6ccf 100644 --- a/src/board_builder/test/graph_search_test.py +++ b/src/board_builder/test/graph_search_test.py @@ -50,7 +50,7 @@ ]).as_numpy_array() -timestamp = str(datetime.datetime.utcnow()) +timestamp = datetime.datetime.now(tz=datetime.timezone.utc) poseLocation_01 = PoseLocation("01") poseLocation_01.frame_count += 60 diff --git a/src/board_builder/test/repeatability/repeatability_test.py b/src/board_builder/test/repeatability/repeatability_test.py index c3c692c..13534f3 100644 --- a/src/board_builder/test/repeatability/repeatability_test.py +++ b/src/board_builder/test/repeatability/repeatability_test.py @@ -1,7 +1,7 @@ import json import os import numpy as np -from src.common.util import register_corresponding_points +from src.common.util import MathUtils def transform_point(point, matrix): """Applies a 4x4 transformation matrix to a 3D point.""" @@ -39,7 +39,8 @@ def register_all_runs(data): # Check if the lengths of the point sets differ if len(current_corners) == len(reference_corners): # Register the entire set of points in the current run to the reference run - transformation_matrix = register_corresponding_points(current_corners.tolist(), reference_corners.tolist()) + transformation_matrix = MathUtils.register_corresponding_points( + current_corners.tolist(), reference_corners.tolist()) # Apply the transformation to the current run registered_run = {} diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py index 7fd64ad..48325ea 100644 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ b/src/board_builder/utils/board_builder_pose_solver.py @@ -5,22 +5,15 @@ from src.common.structures import \ CharucoBoardSpecification, \ IntrinsicParameters, \ + IterativeClosestPointParameters, \ MarkerCorners, \ Matrix4x4, \ Pose, \ + Ray, \ TargetBase, \ TargetMarker from src.common.util import MathUtils -from src.common.util import register_corresponding_points -from src.pose_solver.structures import \ - Ray, \ - PoseSolverParameters -from src.pose_solver.util import \ - average_quaternion, \ - convex_quadrilateral_area, \ - closest_intersection_between_n_lines, \ - IterativeClosestPointParameters, \ - iterative_closest_point_for_points_and_rays +from src.pose_solver.structures import PoseSolverParameters import cv2 import cv2.aruco import datetime @@ -233,7 +226,7 @@ def get_detector_poses( Pose( target_id=detector_label, object_to_reference_matrix=pose, - solver_timestamp_utc_iso8601=str(datetime.datetime.utcnow().isoformat())) + solver_timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) for detector_label, pose in self._poses_by_detector_label.items()] return detector_poses @@ -535,7 +528,7 @@ def _estimate_target_pose_relative_to_reference(self): # After a certain number of intersections, # there may be little point in processing additional (lower precision) ray sets. for marker_id, ray_set_list in ray_sets_by_marker_id.items(): - ray_set_list.sort(key=lambda x: convex_quadrilateral_area(x.image_points), reverse=True) + ray_set_list.sort(key=lambda x: MathUtils.convex_quadrilateral_area(x.image_points), reverse=True) ray_sets_by_marker_id[marker_id] = ray_set_list[0:self._parameters.MAXIMUM_RAY_COUNT_FOR_INTERSECTION] marker_count_by_marker_id: dict[str, int] = dict() @@ -567,7 +560,7 @@ def _estimate_target_pose_relative_to_reference(self): rays.append(Ray( source_point=ray_set.ray_origin_reference, direction=ray_set.ray_directions_reference[corner_index])) - intersection_result = closest_intersection_between_n_lines( + intersection_result = MathUtils.closest_intersection_between_n_lines( rays=rays, maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) if intersection_result.centroids.shape[0] == 0: @@ -657,7 +650,7 @@ def _estimate_target_pose_relative_to_reference(self): object_points_for_intersections = self._corresponding_point_list_in_target(target_id=target_id) object_known_points += object_points_for_intersections reference_known_points += reference_points_for_intersections - initial_object_to_reference_matrix = register_corresponding_points( + initial_object_to_reference_matrix = MathUtils.register_corresponding_points( point_set_from=object_points_for_intersections, point_set_to=reference_points_for_intersections) initial_object_to_reference_estimated = True @@ -687,17 +680,17 @@ def _estimate_target_pose_relative_to_reference(self): mean_position += position mean_position /= len(estimated_positions) initial_object_to_reference_matrix[0:3, 3] = mean_position - mean_orientation = average_quaternion(estimated_orientations) + mean_orientation = MathUtils.average_quaternion(estimated_orientations) initial_object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(mean_orientation).as_matrix() - icp_output = iterative_closest_point_for_points_and_rays( + icp_output = MathUtils.iterative_closest_point_for_points_and_rays( source_known_points=object_known_points, target_known_points=reference_known_points, source_ray_points=object_ray_points, target_rays=reference_rays, initial_transformation_matrix=initial_object_to_reference_matrix, parameters=iterative_closest_point_parameters) - object_to_reference_matrix = icp_output.source_to_target_matrix + object_to_reference_matrix = icp_output.source_to_target_matrix.as_numpy_array() # Compute a depth from each detector, # find newest ray_set for each detector @@ -753,7 +746,7 @@ def _estimate_target_pose_relative_to_reference(self): self._poses_by_target_id[target_id] = pose def _update(self): - now_timestamp = datetime.datetime.utcnow() + now_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) self._now_timestamp = now_timestamp poses_need_update: bool = self._clear_old_values(now_timestamp) poses_need_update |= len(self._marker_corners_since_update) > 0 diff --git a/src/common/mct_component.py b/src/common/mct_component.py index bc512b7..490a639 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -115,7 +115,7 @@ def timestamp_get(self, **kwargs) -> TimestampGetResponse: kwargs=kwargs, key="request", arg_type=TimestampGetRequest) - timestamp_utc_iso8601 : str = datetime.datetime.utcnow().isoformat() + timestamp_utc_iso8601 : str = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() return TimestampGetResponse( requester_timestamp_utc_iso8601=request.requester_timestamp_utc_iso8601, responder_timestamp_utc_iso8601=timestamp_utc_iso8601) @@ -143,12 +143,12 @@ async def websocket_handler(self, websocket: WebSocket) -> None: supported_types=list(self.supported_request_types().keys())) except MCTParsingError as e: logger.exception(str(e)) - await websocket.send_json(MCTResponseSeries(requests_parsed=False).dict()) + await websocket.send_json(MCTResponseSeries().model_dump()) continue response_series: MCTResponseSeries = self.websocket_handle_requests( client_identifier=client_identifier, request_series=MCTRequestSeries(series=request_series_list)) - await websocket.send_json(response_series.dict()) + await websocket.send_json(response_series.model_dump()) except WebSocketDisconnect as e: print(f"DISCONNECTED: {str(e)}") logger.info(str(e)) @@ -180,5 +180,4 @@ def websocket_handle_requests( self.add_status_message(severity="error", message=message) response_series.append(ErrorResponse(message=message)) return MCTResponseSeries( - requests_parsed=True, series=response_series) diff --git a/src/common/status_message_source.py b/src/common/status_message_source.py index 928a70d..dbb38ce 100644 --- a/src/common/status_message_source.py +++ b/src/common/status_message_source.py @@ -56,7 +56,7 @@ def enqueue_status_message( if not source_label: source_label = self._source_label if not timestamp_utc_iso8601: - timestamp_utc_iso8601 = datetime.datetime.utcnow().isoformat() + timestamp_utc_iso8601 = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() elif isinstance(timestamp_utc_iso8601, datetime.datetime): timestamp_utc_iso8601 = timestamp_utc_iso8601.isoformat() message: StatusMessage = StatusMessage( diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py index f721c23..cc51110 100644 --- a/src/common/structures/__init__.py +++ b/src/common/structures/__init__.py @@ -32,13 +32,16 @@ KeyValueMetaFloat, \ KeyValueMetaInt, \ key_value_meta_to_simple +from .linear_algebra import \ + IterativeClosestPointParameters, \ + Matrix4x4, \ + Pose, \ + Ray from .marker_corner_image_point import MarkerCornerImagePoint from .marker_corners import MarkerCorners from .marker_definition import MarkerDefinition from .marker_snapshot import MarkerSnapshot -from .matrix4x4 import Matrix4x4 from .mct_parsable import MCTParsable -from .pose import Pose from .pose_solver_frame import PoseSolverFrame from .pose_solver_status import PoseSolverStatus from .status_message import \ diff --git a/src/common/structures/matrix4x4.py b/src/common/structures/linear_algebra.py similarity index 60% rename from src/common/structures/matrix4x4.py rename to src/common/structures/linear_algebra.py index 84faa8f..7925e6e 100644 --- a/src/common/structures/matrix4x4.py +++ b/src/common/structures/linear_algebra.py @@ -1,16 +1,35 @@ import numpy from pydantic import BaseModel, Field +from typing import Final -def _identity_values() -> list[float]: - return \ - [1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0] +_DEFAULT_EPSILON: Final[float] = 0.0001 + + +class IterativeClosestPointParameters(BaseModel): + # ICP will stop after this many iterations + termination_iteration_count: int = Field() + + # ICP will stop if distance *and* angle difference from one iteration to the next + # is smaller than these + termination_delta_translation: float = Field() + termination_delta_rotation_radians: float = Field() + + # ICP will stop if overall point-to-point distance (between source and target) + # mean *or* root-mean-square is less than specified + termination_mean_point_distance: float = Field() + termination_rms_point_distance: float = Field() # root-mean-square class Matrix4x4(BaseModel): + + @staticmethod + def _identity_values() -> list[float]: + return \ + [1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0] values: list[float] = Field(default_factory=_identity_values) def as_numpy_array(self): @@ -75,3 +94,27 @@ def from_numpy_array( if len(value_array[i]) != 4: raise ValueError(f"Expected input row {i} to have 4 col. Got {len(value_array[i])}.") return Matrix4x4(values=list(value_array.flatten())) + + +class Pose(BaseModel): + target_id: str = Field() + object_to_reference_matrix: Matrix4x4 = Field() + solver_timestamp_utc_iso8601: str = Field() + + +# TODO: Turn this into a pydantic class +class Ray: + source_point: list[float] + direction: list[float] + + def __init__( + self, + source_point: list[float], + direction: list[float], + epsilon: float = _DEFAULT_EPSILON + ): + direction_norm = numpy.linalg.norm(direction) + if direction_norm < epsilon: + raise ValueError("Direction cannot be zero.") + self.source_point = source_point + self.direction = direction diff --git a/src/common/structures/pose.py b/src/common/structures/pose.py deleted file mode 100644 index b43990b..0000000 --- a/src/common/structures/pose.py +++ /dev/null @@ -1,10 +0,0 @@ -from .matrix4x4 import Matrix4x4 -import datetime -from pydantic import BaseModel, Field -import uuid - - -class Pose(BaseModel): - target_id: str = Field() - object_to_reference_matrix: Matrix4x4 = Field() - solver_timestamp_utc_iso8601: str = Field() diff --git a/src/common/structures/pose_solver_frame.py b/src/common/structures/pose_solver_frame.py index f17097e..54a5433 100644 --- a/src/common/structures/pose_solver_frame.py +++ b/src/common/structures/pose_solver_frame.py @@ -1,4 +1,4 @@ -from .pose import Pose +from .linear_algebra import Pose import datetime from pydantic import BaseModel, Field diff --git a/src/common/util/__init__.py b/src/common/util/__init__.py index 5d14bf2..42218d6 100644 --- a/src/common/util/__init__.py +++ b/src/common/util/__init__.py @@ -1,3 +1,2 @@ from .io_utils import IOUtils from .math_utils import MathUtils -from .register_corresponding_points import register_corresponding_points diff --git a/src/common/util/math_utils.py b/src/common/util/math_utils.py index eba1b2c..ec42c56 100644 --- a/src/common/util/math_utils.py +++ b/src/common/util/math_utils.py @@ -1,6 +1,8 @@ from ..structures import \ + IterativeClosestPointParameters, \ IntrinsicParameters, \ Matrix4x4, \ + Ray, \ TargetBase import cv2 import numpy @@ -9,6 +11,7 @@ XPointKey = TypeVar("XPointKey") +_DEFAULT_EPSILON: float = 0.0001 class MathUtils: @@ -19,6 +22,179 @@ class MathUtils: def __init__(self): raise RuntimeError("This class is not meant to be initialized.") + @staticmethod + def average_quaternion( + quaternions: list[list[float]] + ) -> list[float]: + """ + Solution based on this link: https://stackoverflow.com/a/27410865 + based on Markley et al. "Averaging quaternions." Journal of Guidance, Control, and Dynamics 30.4 (2007): 1193-1197. + """ + quaternion_matrix = numpy.array(quaternions, dtype="float32").transpose() # quaternions into columns + quaternion_matrix /= len(quaternions) + eigenvalues, eigenvectors = numpy.linalg.eig(numpy.matmul(quaternion_matrix, quaternion_matrix.transpose())) + maximum_eigenvalue_index = numpy.argmax(eigenvalues) + quaternion = eigenvectors[:, maximum_eigenvalue_index] + if quaternion[3] < 0: + quaternion *= -1 + return quaternion.tolist() + + @staticmethod + def average_vector( + translations: list[list[float]] + ) -> list[float]: + """ + This is a very simple function for averaging translations + when it is not desired to use numpy (for whatever reason) + """ + sum_translations: list[float] = [0.0, 0.0, 0.0] + for translation in translations: + for i in range(0, 3): + sum_translations[i] += translation[i] + translation_count = len(translations) + return [ + sum_translations[0] / translation_count, + sum_translations[1] / translation_count, + sum_translations[2] / translation_count] + + class RayIntersection2Output: + parallel: bool # special case, mark it as such + closest_point_1: numpy.ndarray + closest_point_2: numpy.ndarray + + def __init__( + self, + parallel: bool, + closest_point_1: numpy.ndarray, + closest_point_2: numpy.ndarray + ): + self.parallel = parallel + self.closest_point_1 = closest_point_1 + self.closest_point_2 = closest_point_2 + + def centroid(self) -> numpy.ndarray: + return (self.closest_point_1 + self.closest_point_2) / 2 + + def distance(self) -> float: + return numpy.linalg.norm(self.closest_point_2 - self.closest_point_1) + + @staticmethod + def closest_intersection_between_two_lines( + ray_1: Ray, + ray_2: Ray, + epsilon: float = _DEFAULT_EPSILON + ) -> RayIntersection2Output: # Returns data on intersection + ray_1_direction_normalized = ray_1.direction / numpy.linalg.norm(ray_1.direction) + ray_2_direction_normalized = ray_2.direction / numpy.linalg.norm(ray_2.direction) + + # ray 3 will be perpendicular to both rays 1 and 2, + # and will intersect with both rays at the nearest point(s) + + ray_3_direction = numpy.cross(ray_2_direction_normalized, ray_1_direction_normalized) + ray_3_direction_norm = numpy.linalg.norm(ray_3_direction) + if ray_3_direction_norm < epsilon: + return MathUtils.RayIntersection2Output( + parallel=True, + closest_point_1=ray_1.source_point, + closest_point_2=ray_2.source_point) + + # system of equations Ax = b + b = numpy.subtract(ray_2.source_point, ray_1.source_point) + a = numpy.asarray( + [ray_1_direction_normalized, -ray_2_direction_normalized, ray_3_direction], dtype="float32").transpose() + x = numpy.linalg.solve(a, b) + + param_ray_1 = float(x[0]) + intersection_point_1 = ray_1.source_point + param_ray_1 * ray_1_direction_normalized + + param_ray_2 = float(x[1]) + intersection_point_2 = ray_2.source_point + param_ray_2 * ray_2_direction_normalized + + return MathUtils.RayIntersection2Output( + parallel=False, + closest_point_1=intersection_point_1, + closest_point_2=intersection_point_2) + + class RayIntersectionNOutput: + centroids: numpy.ndarray + + # How many rays were used. + # Note that centroids might not use all possible intersections (e.g. parallel rays) + ray_count: int + + def __init__( + self, + centroids: numpy.ndarray, + ray_count: int + ): + self.centroids = centroids + self.ray_count = ray_count + + def centroid(self) -> numpy.ndarray: + sum_centroids = numpy.asarray([0, 0, 0], dtype="float32") + for centroid in self.centroids: + sum_centroids += centroid + return sum_centroids / self.centroids.shape[0] + + def intersection_count(self) -> int: + return int((self.ray_count * (self.ray_count - 1)) / 2) + + @staticmethod + def closest_intersection_between_n_lines( + rays: list[Ray], + maximum_distance: float + ) -> RayIntersectionNOutput: + ray_count = len(rays) + intersections: list[MathUtils.RayIntersection2Output] = list() + for ray_1_index in range(0, ray_count): + for ray_2_index in range(ray_1_index + 1, ray_count): + intersections.append(MathUtils.closest_intersection_between_two_lines( + ray_1=rays[ray_1_index], + ray_2=rays[ray_2_index])) + centroids: list[numpy.ndarray] = list() + for intersection in intersections: + if intersection.parallel: + continue + if intersection.distance() > maximum_distance: + continue + centroids.append(intersection.centroid()) + return MathUtils.RayIntersectionNOutput( + centroids=numpy.asarray(centroids, dtype="float32"), + ray_count=ray_count) + + @staticmethod + def closest_point_on_ray( + ray_source: list[float], + ray_direction: list[float], + query_point: list[float], + forward_only: bool + ): + """ + Find the closest point on a ray in 3D. + """ + # Let ray_point be the closest point between query_point and the ray. + # (ray_point - query_point) will be perpendicular to ray_direction. + # Let ray_distance be the distance along the ray where the closest point is. + # So we have two equations: + # (1) (ray_point - query_point) * ray_direction = 0 + # (2) ray_point = ray_source + ray_distance * ray_direction + # If we substitute eq (2) into (1) and solve for ray_distance, we get: + ray_distance: float = ( + (query_point[0] * ray_direction[0] + query_point[1] * ray_direction[1] + query_point[2] * ray_direction[ + 2] + - ray_source[0] * ray_direction[0] - ray_source[1] * ray_direction[1] - ray_source[2] * ray_direction[ + 2]) + / + ((ray_direction[0] ** 2) + (ray_direction[1] ** 2) + (ray_direction[2] ** 2))) + + if ray_distance < 0 and forward_only: + return ray_source # point is behind the source, so the closest point is just the source + + ray_point = [0.0] * 3 # temporary values + for i in range(0, 3): + ray_point[i] = ray_source[i] + ray_distance * ray_direction[i] + return ray_point + @staticmethod def convert_detector_points_to_vectors( points: list[list[float]], # [point_index][x/y/z] @@ -71,10 +247,60 @@ def convert_detector_corners_to_vectors( ray_vectors_by_marker_id[marker_id] = rays return ray_vectors_by_marker_id + @staticmethod + def convex_quadrilateral_area( + points: list[list[float]], # 2D points in clockwise order + epsilon: float = _DEFAULT_EPSILON + ) -> float: + """ + Compute the area of a quadrilateral, given 2D points in clockwise order. + """ + + # General approach: + # Given points a, b, c, and d shown below, + # and calculating points e and f shown below, + # add areas defined by right triangles bea, ceb, dfc, and afd + # b..................c + # . .. ... + # . ... ... . + # . .. .f. . + # . .e. ... . + # . ... .. . + # ... ... . + # a...................d + + point_a = numpy.array(points[0], dtype="float32") + point_b = numpy.array(points[1], dtype="float32") + point_c = numpy.array(points[2], dtype="float32") + point_d = numpy.array(points[3], dtype="float32") + + vector_ac = point_c - point_a + vector_ac_norm = numpy.linalg.norm(vector_ac) + vector_bd = point_d - point_b + vector_bd_norm = numpy.linalg.norm(vector_bd) + if vector_ac_norm <= epsilon or vector_bd_norm <= epsilon: + return 0.0 + width_vector = vector_ac / numpy.linalg.norm(vector_ac) + height_vector = numpy.array([width_vector[1], -width_vector[0]], dtype="float32") # rotated 90 degrees + + sum_of_areas: float = 0.0 + point_pairs: list[tuple[numpy.ndarray, numpy.ndarray]] = [ + (point_a, point_b), + (point_b, point_c), + (point_c, point_d), + (point_d, point_a)] + for point_pair in point_pairs: + line_vector = point_pair[1] - point_pair[0] + width = numpy.dot(line_vector, width_vector) + height = numpy.dot(line_vector, height_vector) + sum_of_areas += numpy.abs(width * height / 2.0) + + return sum_of_areas + @staticmethod def estimate_matrix_transform_to_detector( target: TargetBase, - corners_by_marker_id: dict[str, list[list[float]]], # [marker_id][point_index][x/y/z] + corners_by_marker_id: dict[str, list[list[float]]], # [marker_id][point_index][x/y] detector_intrinsics: IntrinsicParameters ) -> Matrix4x4: target_points: list[list[float]] = list() # ordered points [point_index][x/y/z] @@ -98,6 +324,148 @@ def estimate_matrix_transform_to_detector( object_to_detector_matrix = MathUtils.image_to_opengl_transformation_matrix(object_to_camera_matrix) return Matrix4x4.from_numpy_array(object_to_detector_matrix) + class IterativeClosestPointOutput: + source_to_target_matrix: Matrix4x4 + iteration_count: int + mean_point_distance: float + rms_point_distance: float + + def __init__( + self, + source_to_target_matrix: Matrix4x4, + iteration_count: int, + mean_point_distance: float, + rms_point_distance: float + ): + self.source_to_target_matrix = source_to_target_matrix + self.iteration_count = iteration_count + self.mean_point_distance = mean_point_distance + self.rms_point_distance = rms_point_distance + + @staticmethod + def iterative_closest_point_for_points_and_rays( + source_known_points: list[list[float]], + target_known_points: list[list[float]], + source_ray_points: list[list[float]], + target_rays: list[Ray], + parameters: IterativeClosestPointParameters = None, + initial_transformation_matrix: numpy.ndarray = None + ) -> IterativeClosestPointOutput: + """ + Algorithm is based on ICP: Besl and McKay. Method for registration of 3-D shapes. 1992. + This is customized, adapted to the problem of registering a set of points to + a set of points and rays where the correspondence is known. + :param source_known_points: points with known corresponding positions in both source and target coordinate frames + :param target_known_points: points with known corresponding positions in both source and target coordinate frames + :param source_ray_points: points with known position in the source coordinate frame, but NOT in target + :param target_rays: rays along which the remaining target points lie (1:1 correspondence with source_ray_points) + :param parameters: + :param initial_transformation_matrix: + """ + + def _transform_points( + original_points: list[list[float]], + transformation: numpy.ndarray + ): + transformed_points: list[list[float]] = list() + for point in original_points: + transformed_point = list(numpy.matmul( + transformation, + numpy.array([point[0], point[1], point[2], 1]))) + transformed_points.append([transformed_point[0], transformed_point[1], transformed_point[2]]) + return transformed_points + + if len(source_known_points) != len(target_known_points): + raise ValueError( + "source_known_points and target_known_points must be of equal length (1:1 correspondence).") + + if len(source_known_points) != len(target_known_points): + raise ValueError("source_ray_points and target_rays must be of equal length (1:1 correspondence).") + + # Initial transformation + source_to_transformed_matrix: numpy.ndarray + if initial_transformation_matrix is not None: + source_to_transformed_matrix = numpy.array(initial_transformation_matrix, dtype="float32") + else: + source_to_transformed_matrix = numpy.identity(4, dtype="float32") + + if parameters is None: + parameters = IterativeClosestPointParameters( + termination_iteration_count=50, + termination_delta_translation=0.1, + termination_delta_rotation_radians=0.001, + termination_mean_point_distance=0.1, + termination_rms_point_distance=0.1) + + transformed_known_points: list[list[float]] = _transform_points( + original_points=source_known_points, + transformation=source_to_transformed_matrix) + transformed_ray_points: list[list[float]] = _transform_points( + original_points=source_ray_points, + transformation=source_to_transformed_matrix) + + iteration_count: int = 0 + mean_point_distance: float + rms_point_distance: float + while True: + target_ray_points: list[list[float]] = list() + for i, transformed_ray_point in enumerate(transformed_ray_points): + target_ray_points.append(MathUtils.closest_point_on_ray( + ray_source=target_rays[i].source_point, + ray_direction=target_rays[i].direction, + query_point=transformed_ray_point, + forward_only=True)) + + transformed_all_points = transformed_known_points + transformed_ray_points + target_points = target_known_points + target_ray_points + transformed_to_target_matrix = MathUtils.register_corresponding_points( + point_set_from=transformed_all_points, + point_set_to=target_points, + collinearity_do_check=False) + + # update transformation & transformed points + source_to_transformed_matrix = numpy.matmul(transformed_to_target_matrix, source_to_transformed_matrix) + transformed_known_points: list[list[float]] = _transform_points( + original_points=source_known_points, + transformation=source_to_transformed_matrix) + transformed_ray_points: list[list[float]] = _transform_points( + original_points=source_ray_points, + transformation=source_to_transformed_matrix) + + iteration_count += 1 + + transformed_all_points = transformed_known_points + transformed_ray_points + point_offsets = numpy.subtract(target_points, transformed_all_points).tolist() + sum_point_distances = 0.0 + sum_square_point_distances = 0.0 + for delta_point_offset in point_offsets: + delta_point_distance: float = numpy.linalg.norm(delta_point_offset) + sum_point_distances += delta_point_distance + sum_square_point_distances += numpy.square(delta_point_distance) + mean_point_distance = sum_point_distances / len(point_offsets) + rms_point_distance = numpy.sqrt(sum_square_point_distances / len(point_offsets)) + + # Check if termination criteria are met + # Note that transformed_to_target_matrix describes the change since last iteration, so we often operate on it + delta_translation = numpy.linalg.norm(transformed_to_target_matrix[0:3, 3]) + delta_rotation_radians = \ + numpy.linalg.norm(Rotation.from_matrix(transformed_to_target_matrix[0:3, 0:3]).as_rotvec()) + if delta_translation < parameters.termination_delta_translation and \ + delta_rotation_radians < parameters.termination_delta_rotation_radians: + break + if mean_point_distance < parameters.termination_mean_point_distance: + break + if rms_point_distance < parameters.termination_rms_point_distance: + break + if iteration_count >= parameters.termination_iteration_count: + break + + return MathUtils.IterativeClosestPointOutput( + source_to_target_matrix=Matrix4x4.from_numpy_array(source_to_transformed_matrix), + iteration_count=iteration_count, + mean_point_distance=mean_point_distance, + rms_point_distance=rms_point_distance) + @staticmethod def image_to_opengl_transformation_matrix( transformation_matrix_image: numpy.ndarray @@ -123,3 +491,73 @@ def image_to_opengl_vector( # transformation_matrix_180[1, 1] *= -1 # transformation_matrix_180[2, 2] *= -1 # return numpy.matmul(transformation_matrix_180, vector_image) + + @staticmethod + def register_corresponding_points( + point_set_from: list[list[float]], + point_set_to: list[list[float]], + collinearity_do_check: bool = True, + collinearity_zero_threshold: float = 0.0001, + use_oomori_mirror_fix: bool = True + ) -> numpy.array: # 4x4 transformation matrix, indexed by [row,col] + """ + Solution based on: Arun et al. Least square fitting of two 3D point sets (1987) + https://stackoverflow.com/questions/66923224/rigid-registration-of-two-point-clouds-with-known-correspondence + Use mirroring solution proposed by Oomori et al. + Oomori et al. Point cloud matching using singular value decomposition. (2016) + :param point_set_from: + :param point_set_to: + :param collinearity_do_check: Do a (naive) collinearity check. May be computationally expensive. + :param collinearity_zero_threshold: Threshold considered zero for cross product and norm comparisons + :param use_oomori_mirror_fix: Use the mirroring solution proposed in Oomori et al 2016. + """ + if len(point_set_from) != len(point_set_to): + raise ValueError("Input point sets must be of identical length.") + if len(point_set_from) < 3: + raise ValueError("Input point sets must be of length 3 or higher.") + if collinearity_do_check: + for point_set in (point_set_from, point_set_to): + collinear = True # assume true until shown otherwise + p1: numpy.ndarray = numpy.asarray(point_set[0]) + vec1: numpy.ndarray + i: int = 1 + for i in range(i, len(point_set)): + p2: numpy.ndarray = numpy.asarray(point_set[1]) + vec1 = p2 - p1 + vec1_length: float = numpy.linalg.norm(vec1) + if vec1_length > collinearity_zero_threshold: + break # points are distinct, move to next phase + for i in range(i, len(point_set)): + p3: numpy.ndarray = numpy.asarray(point_set[2]) + vec2: numpy.ndarray = p3 - p1 + cross_product_norm: float = numpy.linalg.norm(numpy.cross(vec1, vec2)) + if cross_product_norm > collinearity_zero_threshold: + collinear = False + break + if collinear: + raise ValueError("Input points appear to be collinear - please check the input.") + + # for consistency, points are in rows + point_count = len(point_set_from) + sums_from = numpy.array([0, 0, 0], dtype="float32") + sums_to = numpy.array([0, 0, 0], dtype="float32") + for point_index in range(0, point_count): + sums_from += numpy.array(point_set_from[point_index]) + sums_to += numpy.array(point_set_to[point_index]) + centroid_from = (sums_from / point_count) + centroid_to = (sums_to / point_count) + points_from = numpy.array(point_set_from) + points_to = numpy.array(point_set_to) + centered_points_from = points_from - numpy.hstack(centroid_from) + centered_points_to = points_to - numpy.hstack(centroid_to) + covariance = numpy.matmul(centered_points_from.T, centered_points_to) + u, _, vh = numpy.linalg.svd(covariance) + s = numpy.identity(3, dtype="float32") # s will be the Oomori mirror fix + if use_oomori_mirror_fix: + s[2, 2] = numpy.linalg.det(numpy.matmul(u, vh)) + rotation = numpy.matmul(u, numpy.matmul(s, vh)).transpose() + translation = centroid_to - numpy.matmul(rotation, centroid_from) + matrix = numpy.identity(4, dtype="float32") + matrix[0:3, 0:3] = rotation + matrix[0:3, 3] = translation[0:3].reshape(3) + return matrix diff --git a/src/common/util/register_corresponding_points.py b/src/common/util/register_corresponding_points.py deleted file mode 100644 index d26545e..0000000 --- a/src/common/util/register_corresponding_points.py +++ /dev/null @@ -1,70 +0,0 @@ -# Solution based on: Arun et al. Least square fitting of two 3D point sets (1987) -# https://stackoverflow.com/questions/66923224/rigid-registration-of-two-point-clouds-with-known-correspondence -# Use mirroring solution proposed by Oomori et al. -# Oomori et al. Point cloud matching using singular value decomposition. (2016) -import numpy - - -def register_corresponding_points( - point_set_from: list[list[float]], - point_set_to: list[list[float]], - collinearity_do_check: bool = True, - collinearity_zero_threshold: float = 0.0001, - use_oomori_mirror_fix: bool = True -) -> numpy.array: # 4x4 transformation matrix, indexed by [row,col] - """ - :param point_set_from: - :param point_set_to: - :param collinearity_do_check: Do a (naive) collinearity check. May be computationally expensive. - :param collinearity_zero_threshold: Threshold considered zero for cross product and norm comparisons - """ - if len(point_set_from) != len(point_set_to): - raise ValueError("Input point sets must be of identical length.") - if len(point_set_from) < 3: - raise ValueError("Input point sets must be of length 3 or higher.") - if collinearity_do_check: - for point_set in (point_set_from, point_set_to): - collinear = True # assume true until shown otherwise - p1: numpy.ndarray = numpy.asarray(point_set[0]) - vec1: numpy.ndarray - i: int = 1 - for i in range(i, len(point_set)): - p2: numpy.ndarray = numpy.asarray(point_set[1]) - vec1 = p2 - p1 - vec1_length: float = numpy.linalg.norm(vec1) - if vec1_length > collinearity_zero_threshold: - break # points are distinct, move to next phase - for i in range(i, len(point_set)): - p3: numpy.ndarray = numpy.asarray(point_set[2]) - vec2: numpy.ndarray = p3 - p1 - cross_product_norm: float = numpy.linalg.norm(numpy.cross(vec1, vec2)) - if cross_product_norm > collinearity_zero_threshold: - collinear = False - break - if collinear: - raise ValueError("Input points appear to be collinear - please check the input.") - - # for consistency, points are in rows - point_count = len(point_set_from) - sums_from = numpy.array([0, 0, 0], dtype="float32") - sums_to = numpy.array([0, 0, 0], dtype="float32") - for point_index in range(0, point_count): - sums_from += numpy.array(point_set_from[point_index]) - sums_to += numpy.array(point_set_to[point_index]) - centroid_from = (sums_from / point_count) - centroid_to = (sums_to / point_count) - points_from = numpy.array(point_set_from) - points_to = numpy.array(point_set_to) - centered_points_from = points_from - numpy.hstack(centroid_from) - centered_points_to = points_to - numpy.hstack(centroid_to) - covariance = numpy.matmul(centered_points_from.T, centered_points_to) - u, _, vh = numpy.linalg.svd(covariance) - s = numpy.identity(3, dtype="float32") # s will be the Oomori mirror fix - if use_oomori_mirror_fix: - s[2, 2] = numpy.linalg.det(numpy.matmul(u, vh)) - rotation = numpy.matmul(u, numpy.matmul(s, vh)).transpose() - translation = centroid_to - numpy.matmul(rotation, centroid_from) - matrix = numpy.identity(4, dtype="float32") - matrix[0:3, 0:3] = rotation - matrix[0:3, 3] = translation[0:3].reshape(3) - return matrix diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index b3782a9..51aa797 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -196,7 +196,7 @@ def _advance_startup_state(self) -> None: message="TIME_SYNC complete") component_labels: list[str] = self.get_component_labels(active=True) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - TimestampGetRequest(requester_timestamp_utc_iso8601=datetime.datetime.utcnow().isoformat())]) + TimestampGetRequest(requester_timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat())]) for component_label in component_labels: self._pending_request_ids.append( self.request_series_push( @@ -455,7 +455,7 @@ def handle_response_get_poses( pose_solver_connection.detector_poses = response.detector_poses pose_solver_connection.target_poses = response.target_poses pose_solver_connection.poses_timestamp = ( - datetime.datetime.utcnow() - # TODO: This should come from the pose solver + datetime.datetime.now(tz=datetime.timezone.utc) - # TODO: This should come from the pose solver datetime.timedelta(seconds=pose_solver_connection.controller_offset_seconds)) def handle_response_timestamp_get( @@ -466,7 +466,7 @@ def handle_response_timestamp_get( connection: Connection = self._get_connection( connection_label=component_label, connection_type=Connection) - utc_now: datetime.datetime = datetime.datetime.utcnow() + utc_now: datetime.datetime = datetime.datetime.now(tz=datetime.timezone.utc) requester_timestamp: datetime.datetime requester_timestamp = datetime.datetime.fromisoformat(response.requester_timestamp_utc_iso8601) round_trip_seconds: float = (utc_now - requester_timestamp).total_seconds() diff --git a/src/controller/structures/connection.py b/src/controller/structures/connection.py index 0bf73bf..0dd90d9 100644 --- a/src/controller/structures/connection.py +++ b/src/controller/structures/connection.py @@ -174,7 +174,7 @@ def enqueue_status_message( source_label=self._component_address.label, severity=severity, message=message, - timestamp_utc_iso8601=datetime.datetime.utcnow().isoformat())) + timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat())) def get_current_state(self) -> str: return self._state @@ -312,7 +312,7 @@ def start_up(self) -> None: f"Current state: {self._state}") self._state = Connection.State.CONNECTING self._attempt_count = 0 - self._next_attempt_timestamp_utc = datetime.datetime.utcnow() + self._next_attempt_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) @abc.abstractmethod def supported_response_types(self) -> list[type[MCTResponse]]: @@ -404,7 +404,7 @@ def _update_initialization_result(self) -> InitializationResult: return Connection.InitializationResult.IN_PROGRESS def _update_in_connecting_state(self) -> None: - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(tz=datetime.timezone.utc) if now_utc >= self._next_attempt_timestamp_utc: self._attempt_count += 1 connection_result: Connection.ConnectionResult = self._try_connect() @@ -461,7 +461,7 @@ def _update_in_normal_disconnecting_state(self) -> None: self._state = Connection.State.INACTIVE def _update_in_reconnecting_state(self) -> None: - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(tz=datetime.timezone.utc) if now_utc >= self._next_attempt_timestamp_utc: connection_result: Connection.ConnectionResult = self._try_connect() if connection_result.success: diff --git a/src/detector/calibrator.py b/src/detector/calibrator.py index 2a4fa27..94d7939 100644 --- a/src/detector/calibrator.py +++ b/src/detector/calibrator.py @@ -106,6 +106,9 @@ def calculate( image_resolution: ImageResolution, marker_parameters: list[KeyValueSimpleAny] ) -> tuple[str, IntrinsicCalibration]: + """ + :returns: a tuple containing a result identifier (GUID as string) and the IntrinsicCalibration structure + """ calibration_key: ImageResolution = image_resolution if calibration_key not in self._calibration_map: @@ -197,7 +200,7 @@ def calculate( # Note too that there is an unchecked expectation that radial distortion be monotonic. intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration( - timestamp_utc=str(datetime.datetime.utcnow()), + timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), image_resolution=image_resolution, reprojection_error=charuco_overall_reprojection_error, calibrated_values=IntrinsicParameters( @@ -242,7 +245,7 @@ def calculate( result_identifier=result_identifier) IOUtils.json_write( filepath=result_filepath, - json_dict=intrinsic_calibration.dict(), + json_dict=intrinsic_calibration.model_dump(), on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( severity="error", message=msg), @@ -552,7 +555,7 @@ def _result_filepath( def save(self) -> None: IOUtils.json_write( filepath=self._map_filepath(), - json_dict=CalibrationMap.from_dict(self._calibration_map).dict(), + json_dict=CalibrationMap.from_dict(self._calibration_map).model_dump(), on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( severity="error", message=msg), diff --git a/src/detector/implementations/camera_opencv_capture_device.py b/src/detector/implementations/camera_opencv_capture_device.py index 587061c..59639ce 100644 --- a/src/detector/implementations/camera_opencv_capture_device.py +++ b/src/detector/implementations/camera_opencv_capture_device.py @@ -259,7 +259,7 @@ def start(self) -> None: # NOTE: The USB3 cameras bought for this project appear to require some basic parameters to be set, # otherwise frame grab results in error default_resolution: ImageResolution = ImageResolution.from_str(_CAMERA_RESOLUTION_DEFAULT) - self._capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) + self._capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')) self._capture.set(cv2.CAP_PROP_FRAME_WIDTH, float(default_resolution.x_px)) self._capture.set(cv2.CAP_PROP_FRAME_HEIGHT, float(default_resolution.y_px)) self._capture.set(cv2.CAP_PROP_FPS, float(_CAMERA_FPS_DEFAULT)) @@ -300,4 +300,4 @@ def update(self) -> None: self.set_status(CameraStatus.FAILURE) raise MCTDetectorRuntimeError(message=message) - self._image_timestamp_utc = datetime.datetime.utcnow() + self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/implementations/camera_picamera2.py b/src/detector/implementations/camera_picamera2.py index 52773ff..09ca6c9 100644 --- a/src/detector/implementations/camera_picamera2.py +++ b/src/detector/implementations/camera_picamera2.py @@ -277,4 +277,4 @@ def update(self) -> None: self.set_status(CameraStatus.FAILURE) raise MCTDetectorRuntimeError(message=message) - self._image_timestamp_utc = datetime.datetime.utcnow() + self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/implementations/marker_aruco_opencv.py b/src/detector/implementations/marker_aruco_opencv.py index 61c2693..c08d5ca 100644 --- a/src/detector/implementations/marker_aruco_opencv.py +++ b/src/detector/implementations/marker_aruco_opencv.py @@ -45,7 +45,7 @@ def __init__( status_message_source=status_message_source) self._marker_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) - self._marker_parameters = cv2.aruco.DetectorParameters_create() + self._marker_parameters = cv2.aruco.DetectorParameters() self._marker_label_reverse_dictionary = dict() self._marker_detected_snapshots = list() # Markers that are determined to be valid, and are identified self._marker_rejected_snapshots = list() # Things that looked at first like markers but got later filtered out @@ -147,4 +147,4 @@ def update( label=f"unknown", corner_image_points=corner_image_points)) - self._marker_timestamp_utc = datetime.datetime.utcnow() + self._marker_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/structures/calibration_image_metadata.py b/src/detector/structures/calibration_image_metadata.py index dc524a7..d3c2a04 100644 --- a/src/detector/structures/calibration_image_metadata.py +++ b/src/detector/structures/calibration_image_metadata.py @@ -6,5 +6,5 @@ class CalibrationImageMetadata(BaseModel): identifier: str = Field() label: str = Field(default_factory=str) - timestamp_utc: str = Field(default_factory=lambda: str(datetime.datetime.utcnow())) + timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) state: CalibrationImageState = Field(default=CalibrationImageState.SELECT) diff --git a/src/detector/structures/calibration_result_metadata.py b/src/detector/structures/calibration_result_metadata.py index 605b989..cef3265 100644 --- a/src/detector/structures/calibration_result_metadata.py +++ b/src/detector/structures/calibration_result_metadata.py @@ -6,7 +6,7 @@ class CalibrationResultMetadata(BaseModel): identifier: str = Field() label: str = Field(default_factory=str) - timestamp_utc_iso8601: str = Field(default_factory=lambda: str(datetime.datetime.utcnow())) + timestamp_utc_iso8601: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) image_identifiers: list[str] = Field(default_factory=list) state: CalibrationResultState = Field(default=CalibrationResultState.RETAIN) diff --git a/src/gui/panels/board_builder_panel.py b/src/gui/panels/board_builder_panel.py index 021c64f..3dae97a 100644 --- a/src/gui/panels/board_builder_panel.py +++ b/src/gui/panels/board_builder_panel.py @@ -7,9 +7,7 @@ import numpy import wx import wx.grid -from cv2 import aruco import datetime -import json import os from src.common.api.empty_response import EmptyResponse @@ -23,7 +21,8 @@ from src.common.structures.detector_frame import DetectorFrame from src.common.structures.image_resolution import ImageResolution from src.common.structures.marker_snapshot import MarkerSnapshot -from src.detector.api import CameraImageGetRequest, CalibrationResultGetActiveRequest, \ +from src.detector.api import \ + CameraImageGetRequest, \ CalibrationResultGetActiveResponse from src.detector.api import CameraImageGetResponse from src.gui.panels.detector_panel import _CAPTURE_FORMAT @@ -56,9 +55,9 @@ class LiveDetectorPreview(BasePanel): image: str def __init__( - self, - detector_label: str, - image_panel: ImagePanel + self, + detector_label: str, + image_panel: ImagePanel ): self.detector_label = detector_label self.image_panel = image_panel @@ -583,7 +582,7 @@ def _render_frame(self, detector_poses, target_poses): pose_solver_frame = PoseSolverFrame( detector_poses=detector_poses, target_poses=target_poses, - timestamp_utc_iso8601=datetime.datetime.utcnow().isoformat() + timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat() ) ### RENDERER ### diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/calibrator_panel.py index bb6b928..67651f9 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/calibrator_panel.py @@ -336,7 +336,7 @@ def _handle_response_calibrate( self._calibrate_status_textbox.SetValue( f"Calibration {response.result_identifier} complete - values: " f"{str(response.intrinsic_calibration.calibrated_values.as_array())}") - self._result_display_textbox.SetValue(response.intrinsic_calibration.json(indent=4)) + self._result_display_textbox.SetValue(response.intrinsic_calibration.model_dump_json(indent=4)) self._calibration_in_progress = False self._force_last_result_selected = True @@ -359,7 +359,7 @@ def _handle_response_get_calibration_result( self, response: CalibrationResultGetResponse ) -> None: - self._result_display_textbox.SetValue(str(response.intrinsic_calibration.json(indent=4))) + self._result_display_textbox.SetValue(str(response.intrinsic_calibration.model_dump_json(indent=4))) def _handle_response_list_calibration_detector_resolutions( self, diff --git a/src/gui/panels/pose_solver_panel.py b/src/gui/panels/pose_solver_panel.py index e07b06e..a8355cf 100644 --- a/src/gui/panels/pose_solver_panel.py +++ b/src/gui/panels/pose_solver_panel.py @@ -180,7 +180,7 @@ def on_tracking_row_selected(self, _event: wx.grid.GridEvent) -> None: selected_index: int | None = self._tracking_table.get_selected_row_index() if selected_index is not None: if 0 <= selected_index < len(self._tracked_target_poses): - display_text: str = self._tracked_target_poses[selected_index].json(indent=4) + display_text: str = self._tracked_target_poses[selected_index].model_dump_json(indent=4) self._tracking_display_textbox.SetValue(display_text) else: self.status_message_source.enqueue_status_message( diff --git a/src/gui/panels/specialized/graphics_renderer.py b/src/gui/panels/specialized/graphics_renderer.py index 9ca7390..f5a126b 100644 --- a/src/gui/panels/specialized/graphics_renderer.py +++ b/src/gui/panels/specialized/graphics_renderer.py @@ -129,8 +129,8 @@ def __init__( handler=self._on_resize) self._context = GLContext(win=self) - self._last_render_datetime_utc = datetime.datetime.utcnow() - self._last_update_datetime_utc = datetime.datetime.utcnow() + self._last_render_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) + self._last_update_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) self._shaders = set() self._model_dictionary = dict() self._scene_objects = list() @@ -243,10 +243,10 @@ def render(self): scene_object.model.draw(translation=translation, rotation_quaternion=list(rotation_quaternion)) self.SwapBuffers() - self._last_render_datetime_utc = datetime.datetime.utcnow() + self._last_render_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) def _compute_seconds_since_last_update(self) -> float: - return (datetime.datetime.utcnow() - self._last_update_datetime_utc).total_seconds() + return (datetime.datetime.now(tz=datetime.timezone.utc) - self._last_update_datetime_utc).total_seconds() @staticmethod def _compute_mouse_deltas( @@ -337,7 +337,7 @@ def _on_mouse_moved(self, event: wx.MouseEvent): numpy.array(self._perspective_target) + delta_y_millimeters * translation_y_vector) self._update_world_to_view() - self._last_update_datetime_utc = datetime.datetime.utcnow() + self._last_update_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) def _on_mouse_wheel(self, event: wx.MouseEvent): delta_time_seconds: float = self._compute_seconds_since_last_update() @@ -358,7 +358,7 @@ def _on_mouse_wheel(self, event: wx.MouseEvent): numpy.array(self._perspective_target) - delta_z_millimeters * translation_z_vector) self._update_world_to_view() - self._last_update_datetime_utc = datetime.datetime.utcnow() + self._last_update_datetime_utc = datetime.datetime.now(tz=datetime.timezone.utc) def _on_resize(self, _evt: wx.SizeEvent): self._update_viewport() diff --git a/src/pose_solver/pose_solver.py b/src/pose_solver/pose_solver.py index 4635b7d..60372bb 100644 --- a/src/pose_solver/pose_solver.py +++ b/src/pose_solver/pose_solver.py @@ -3,23 +3,16 @@ from .structures import \ DetectorRecord, \ DetectorFrameRecord, \ - Ray, \ PoseSolverParameters -from .util import \ - average_quaternion, \ - average_vector, \ - closest_intersection_between_n_lines, \ - IterativeClosestPointParameters, \ - iterative_closest_point_for_points_and_rays from src.common.structures import \ DetectorFrame, \ IntrinsicParameters, \ + IterativeClosestPointParameters, \ Matrix4x4, \ Pose, \ + Ray, \ TargetBase -from src.common.util import \ - MathUtils, \ - register_corresponding_points +from src.common.util import MathUtils import cv2 import cv2.aruco import datetime @@ -65,8 +58,8 @@ class PoseSolver: def __init__( self ): - self._last_change_timestamp_utc = datetime.datetime.min - self._last_updated_timestamp_utc = datetime.datetime.min + self._last_change_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + self._last_updated_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) self._parameters = PoseSolverParameters() self._intrinsics_by_detector_label = dict() @@ -97,7 +90,7 @@ def add_detector_frame( self._detector_records_by_detector_label[detector_label] = DetectorRecord() self._detector_records_by_detector_label[detector_label].clear_frame_records() self._detector_records_by_detector_label[detector_label].add_frame_record(detector_frame_record) - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def add_target( self, @@ -118,20 +111,20 @@ def add_target( self._targets.append(target) for marker_id in marker_ids: self._marker_target_map[marker_id] = self._targets[target_index] - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def clear_extrinsic_matrices(self): self._extrinsics_by_detector_label.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def clear_intrinsic_parameters(self): self._intrinsics_by_detector_label.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def clear_targets(self): self._targets.clear() self._marker_target_map.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def get_poses( self @@ -162,7 +155,7 @@ def set_extrinsic_matrix( transform_to_reference: Matrix4x4 ) -> None: self._extrinsics_by_detector_label[detector_label] = transform_to_reference - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def set_intrinsic_parameters( self, @@ -170,7 +163,7 @@ def set_intrinsic_parameters( intrinsic_parameters: IntrinsicParameters ) -> None: self._intrinsics_by_detector_label[detector_label] = intrinsic_parameters - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def set_reference_target( self, @@ -180,7 +173,7 @@ def set_reference_target( for target_index, target in enumerate(self._targets): if target.target_id == target_id: self._targets[0], self._targets[target_index] = self._targets[target_index], self._targets[0] - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) found = True break if not found: @@ -193,7 +186,7 @@ def set_targets( self._targets = targets self._poses_by_target_id.clear() self._poses_by_detector_label.clear() - self._last_change_timestamp_utc = datetime.datetime.utcnow() + self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def _calculate_reprojection_error_for_pose( self, @@ -309,8 +302,8 @@ def _denoise_detector_to_reference_pose( for pose in poses_to_average] orientations = [list(Rotation.from_matrix(pose.object_to_reference_matrix[0:3, 0:3]).as_quat(canonical=True)) for pose in poses_to_average] - filtered_translation = average_vector(translations) - filtered_orientation = average_quaternion(orientations) + filtered_translation = MathUtils.average_vector(translations) + filtered_orientation = MathUtils.average_quaternion(orientations) filtered_object_to_reference_matrix = numpy.identity(4, dtype="float32") filtered_object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(filtered_orientation).as_matrix() filtered_object_to_reference_matrix[0:3, 3] = filtered_translation @@ -326,7 +319,7 @@ def update(self) -> None: if len(self._targets) == 0: return - self._last_updated_timestamp_utc = datetime.datetime.utcnow() + self._last_updated_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) self._poses_by_detector_label.clear() self._poses_by_target_id.clear() @@ -393,7 +386,7 @@ def update(self) -> None: intersections_appear_valid: bool = True # If something looks off, set this to False corners_reference_by_corner_index: list[list[float]] = list() for corner_index in range(0, _CORNER_COUNT): - intersection_result = closest_intersection_between_n_lines( + intersection_result = MathUtils.closest_intersection_between_n_lines( rays=ray_list_by_corner_index[corner_index], maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) if intersection_result.centroids.shape[0] == 0: @@ -464,11 +457,11 @@ def update(self) -> None: termination_mean_point_distance=self._parameters.icp_termination_mean_point_distance, termination_rms_point_distance=self._parameters.icp_termination_rms_point_distance) if len(marker_ids_with_intersections) >= 1: - initial_detected_to_reference_matrix = register_corresponding_points( + initial_detected_to_reference_matrix = MathUtils.register_corresponding_points( point_set_from=detected_known_points, point_set_to=reference_known_points, collinearity_do_check=False) - icp_output = iterative_closest_point_for_points_and_rays( + icp_output = MathUtils.iterative_closest_point_for_points_and_rays( source_known_points=detected_known_points, target_known_points=reference_known_points, source_ray_points=detected_ray_points, @@ -476,11 +469,10 @@ def update(self) -> None: initial_transformation_matrix=initial_detected_to_reference_matrix, parameters=iterative_closest_point_parameters) else: - icp_output = iterative_closest_point_for_points_and_rays( + icp_output = MathUtils.iterative_closest_point_for_points_and_rays( source_known_points=detected_known_points, target_known_points=reference_known_points, source_ray_points=detected_ray_points, target_rays=reference_rays, parameters=iterative_closest_point_parameters) - detected_to_reference = icp_output.source_to_target_matrix - self._poses_by_target_id[target.target_id] = Matrix4x4.from_numpy_array(detected_to_reference) + self._poses_by_target_id[target.target_id] = icp_output.source_to_target_matrix diff --git a/src/pose_solver/structures.py b/src/pose_solver/structures.py index 9004252..8626072 100644 --- a/src/pose_solver/structures.py +++ b/src/pose_solver/structures.py @@ -1,14 +1,8 @@ from src.common.structures import \ - DetectorFrame, \ - Matrix4x4 + DetectorFrame import cv2.aruco import datetime -import numpy from pydantic import BaseModel, Field -from typing import Final - - -EPSILON: Final[float] = 0.0001 class DetectorFrameRecord: @@ -151,19 +145,3 @@ class PoseSolverParameters(BaseModel): # SOLVEPNP_P3P appears to return nan's on rare occasion # SOLVEPNP_SQPNP appears to return nan's on rare occasion # SOLVEPNP_IPPE_SQUARE does not seem to work very well at all, translation is much smaller than expected - - -class Ray: - source_point: list[float] - direction: list[float] - - def __init__( - self, - source_point: list[float], - direction: list[float] - ): - direction_norm = numpy.linalg.norm(direction) - if direction_norm < EPSILON: - raise ValueError("Direction cannot be zero.") - self.source_point = source_point - self.direction = direction diff --git a/src/pose_solver/util/__init__.py b/src/pose_solver/util/__init__.py deleted file mode 100644 index b800e4b..0000000 --- a/src/pose_solver/util/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .average_quaternion import average_quaternion -from .average_vector import average_vector -from .closest_point_on_ray import closest_point_on_ray -from .convex_quadrilateral_area import convex_quadrilateral_area -from .iterative_closest_point import \ - IterativeClosestPointParameters, \ - IterativeClosestPointOutput, \ - iterative_closest_point_for_points_and_rays -from .line_intersection import \ - closest_intersection_between_two_lines, \ - closest_intersection_between_n_lines diff --git a/src/pose_solver/util/average_quaternion.py b/src/pose_solver/util/average_quaternion.py deleted file mode 100644 index ae3d7cf..0000000 --- a/src/pose_solver/util/average_quaternion.py +++ /dev/null @@ -1,17 +0,0 @@ -# Solution based on this link: https://stackoverflow.com/a/27410865 -# based on Markley et al. "Averaging quaternions." Journal of Guidance, Control, and Dynamics 30.4 (2007): 1193-1197. -import numpy -from typing import List - - -def average_quaternion( - quaternions: List[List[float]] -) -> List[float]: - quaternion_matrix = numpy.array(quaternions, dtype="float32").transpose() # quaternions into columns - quaternion_matrix /= len(quaternions) - eigenvalues, eigenvectors = numpy.linalg.eig(numpy.matmul(quaternion_matrix, quaternion_matrix.transpose())) - maximum_eigenvalue_index = numpy.argmax(eigenvalues) - quaternion = eigenvectors[:, maximum_eigenvalue_index] - if quaternion[3] < 0: - quaternion *= -1 - return quaternion.tolist() diff --git a/src/pose_solver/util/average_vector.py b/src/pose_solver/util/average_vector.py deleted file mode 100644 index b61be16..0000000 --- a/src/pose_solver/util/average_vector.py +++ /dev/null @@ -1,17 +0,0 @@ -# This is a very simple function for averaging translations -# when it is not desired to use numpy (for whatever reason) -from typing import List - - -def average_vector( - translations: List[List[float]] -) -> List[float]: - sum_translations: List[float] = [0.0, 0.0, 0.0] - for translation in translations: - for i in range(0, 3): - sum_translations[i] += translation[i] - translation_count = len(translations) - return [ - sum_translations[0] / translation_count, - sum_translations[1] / translation_count, - sum_translations[2] / translation_count] diff --git a/src/pose_solver/util/closest_point_on_ray.py b/src/pose_solver/util/closest_point_on_ray.py deleted file mode 100644 index 94da76b..0000000 --- a/src/pose_solver/util/closest_point_on_ray.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import List - - -# Find the closest point on a ray in 3D -def closest_point_on_ray( - ray_source: List[float], - ray_direction: List[float], - query_point: List[float], - forward_only: bool -): - # Let ray_point be the closest point between query_point and the ray. - # (ray_point - query_point) will be perpendicular to ray_direction. - # Let ray_distance be the distance along the ray where the closest point is. - # So we have two equations: - # (1) (ray_point - query_point) * ray_direction = 0 - # (2) ray_point = ray_source + ray_distance * ray_direction - # If we substitute eq (2) into (1) and solve for ray_distance, we get: - ray_distance: float = ( - (query_point[0] * ray_direction[0] + query_point[1] * ray_direction[1] + query_point[2] * ray_direction[2] - - ray_source[0] * ray_direction[0] - ray_source[1] * ray_direction[1] - ray_source[2] * ray_direction[2]) - / - ((ray_direction[0] ** 2) + (ray_direction[1] ** 2) + (ray_direction[2] ** 2))) - - if ray_distance < 0 and forward_only: - return ray_source # point is behind the source, so the closest point is just the source - - ray_point = [0.0] * 3 # temporary values - for i in range(0, 3): - ray_point[i] = ray_source[i] + ray_distance * ray_direction[i] - return ray_point diff --git a/src/pose_solver/util/convex_quadrilateral_area.py b/src/pose_solver/util/convex_quadrilateral_area.py deleted file mode 100644 index 5b1b747..0000000 --- a/src/pose_solver/util/convex_quadrilateral_area.py +++ /dev/null @@ -1,64 +0,0 @@ -import numpy -from typing import Final - -EPSILON: Final[float] = 0.0001 - - -# General approach: -# Given points a, b, c, and d shown below, -# and calculating points e and f shown below, -# add areas defined by right triangles bea, ceb, dfc, and afd -# b..................c -# . .. ... -# . ... ... . -# . .. .f. . -# . .e. ... . -# . ... .. . -# ... ... . -# a...................d -def convex_quadrilateral_area( - points: list[list[float]] # 2D points in clockwise order -) -> float: - point_a = numpy.array(points[0], dtype="float32") - point_b = numpy.array(points[1], dtype="float32") - point_c = numpy.array(points[2], dtype="float32") - point_d = numpy.array(points[3], dtype="float32") - - vector_ac = point_c - point_a - vector_ac_norm = numpy.linalg.norm(vector_ac) - vector_bd = point_d - point_b - vector_bd_norm = numpy.linalg.norm(vector_bd) - if vector_ac_norm <= EPSILON or vector_bd_norm <= EPSILON: - return 0.0 - width_vector = vector_ac / numpy.linalg.norm(vector_ac) - height_vector = numpy.array([width_vector[1], -width_vector[0]], dtype="float32") # rotated 90 degrees - - sum_of_areas: float = 0.0 - point_pairs: list[tuple[numpy.ndarray, numpy.ndarray]] = [ - (point_a, point_b), - (point_b, point_c), - (point_c, point_d), - (point_d, point_a)] - for point_pair in point_pairs: - line_vector = point_pair[1] - point_pair[0] - width = numpy.dot(line_vector, width_vector) - height = numpy.dot(line_vector, height_vector) - sum_of_areas += numpy.abs(width * height / 2.0) - - return sum_of_areas - - -def test(): - points = [ - [1.0, 3.0], - [2.0, 5.0], - [5.0, 3.0], - [3.0, 2.0]] - area = 6.0 - assert abs(convex_quadrilateral_area(points) - area) <= EPSILON - assert abs(convex_quadrilateral_area([points[3]] + points[0:3]) - area) <= EPSILON - print("Success") - - -if __name__ == "__main__": - test() diff --git a/src/pose_solver/util/iterative_closest_point.py b/src/pose_solver/util/iterative_closest_point.py deleted file mode 100644 index d17626d..0000000 --- a/src/pose_solver/util/iterative_closest_point.py +++ /dev/null @@ -1,235 +0,0 @@ -# Algorithm is based on ICP: Besl and McKay. Method for registration of 3-D shapes. 1992. -import datetime # for testing, not needed for the algorithm itself -import numpy -from scipy.spatial.transform import Rotation -from src.pose_solver.structures import Ray -from .closest_point_on_ray import closest_point_on_ray -from src.common.util import register_corresponding_points - - -class IterativeClosestPointParameters: - # ICP will stop after this many iterations - termination_iteration_count: int - - # ICP will stop if distance *and* angle difference from one iteration to the next - # is smaller than these - termination_delta_translation: float - termination_delta_rotation_radians: float - - # ICP will stop if overall point-to-point distance (between source and target) - # mean *or* root-mean-square is less than specified - termination_mean_point_distance: float - termination_rms_point_distance: float # root-mean-square - - def __init__( - self, - termination_iteration_count: int, - termination_delta_translation: float, - termination_delta_rotation_radians: float, - termination_mean_point_distance: float, - termination_rms_point_distance: float - ): - self.termination_iteration_count = termination_iteration_count - self.termination_delta_translation = termination_delta_translation - self.termination_delta_rotation_radians = termination_delta_rotation_radians - self.termination_mean_point_distance = termination_mean_point_distance - self.termination_rms_point_distance = termination_rms_point_distance - - -class IterativeClosestPointOutput: - source_to_target_matrix: numpy.ndarray - iteration_count: int - mean_point_distance: float - rms_point_distance: float - - def __init__( - self, - source_to_target_matrix: numpy.ndarray, - iteration_count: int, - mean_point_distance: float, - rms_point_distance: float - ): - self.source_to_target_matrix = source_to_target_matrix - self.iteration_count = iteration_count - self.mean_point_distance = mean_point_distance - self.rms_point_distance = rms_point_distance - - -def _calculate_transformed_points( - original_points: list[list[float]], - transformation: numpy.ndarray -): - transformed_points: list[list[float]] = list() - for point in original_points: - transformed_point = list(numpy.matmul( - transformation, - numpy.array([point[0], point[1], point[2], 1]))) - transformed_points.append([transformed_point[0], transformed_point[1], transformed_point[2]]) - return transformed_points - - -# This is a customized implementation for Iterative Closest Point -# adapted to the problem of registering a set of points to -# a set of points and rays where the correspondence is known. -def iterative_closest_point_for_points_and_rays( - source_known_points: list[list[float]], - target_known_points: list[list[float]], - source_ray_points: list[list[float]], - target_rays: list[Ray], - parameters: IterativeClosestPointParameters = None, - initial_transformation_matrix: numpy.ndarray = None -) -> IterativeClosestPointOutput: - """ - :param source_known_points: points with known corresponding positions in both source and target coordinate frames - :param target_known_points: points with known corresponding positions in both source and target coordinate frames - :param source_ray_points: points with known position in the source coordinate frame, but NOT in target - :param target_rays: rays along which the remaining target points lie (1:1 correspondence with source_ray_points) - :param parameters: - :param initial_transformation_matrix: - """ - - if len(source_known_points) != len(target_known_points): - raise ValueError("source_known_points and target_known_points must be of equal length (1:1 correspondence).") - - if len(source_known_points) != len(target_known_points): - raise ValueError("source_ray_points and target_rays must be of equal length (1:1 correspondence).") - - # Initial transformation - source_to_transformed_matrix: numpy.ndarray - if initial_transformation_matrix is not None: - source_to_transformed_matrix = numpy.array(initial_transformation_matrix, dtype="float32") - else: - source_to_transformed_matrix = numpy.identity(4, dtype="float32") - - if parameters is None: - parameters = IterativeClosestPointParameters( - termination_iteration_count=50, - termination_delta_translation=0.1, - termination_delta_rotation_radians=0.001, - termination_mean_point_distance=0.1, - termination_rms_point_distance=0.1) - - transformed_known_points: list[list[float]] = _calculate_transformed_points( - original_points=source_known_points, - transformation=source_to_transformed_matrix) - transformed_ray_points: list[list[float]] = _calculate_transformed_points( - original_points=source_ray_points, - transformation=source_to_transformed_matrix) - - iteration_count: int = 0 - mean_point_distance: float - rms_point_distance: float - while True: - target_ray_points: list[list[float]] = list() - for i, transformed_ray_point in enumerate(transformed_ray_points): - target_ray_points.append(closest_point_on_ray( - ray_source=target_rays[i].source_point, - ray_direction=target_rays[i].direction, - query_point=transformed_ray_point, - forward_only=True)) - - transformed_points = transformed_known_points + transformed_ray_points - target_points = target_known_points + target_ray_points - transformed_to_target_matrix = register_corresponding_points( - point_set_from=transformed_points, - point_set_to=target_points, - collinearity_do_check=False) - - # update transformation & transformed points - source_to_transformed_matrix = numpy.matmul(transformed_to_target_matrix, source_to_transformed_matrix) - transformed_known_points: list[list[float]] = _calculate_transformed_points( - original_points=source_known_points, - transformation=source_to_transformed_matrix) - transformed_ray_points: list[list[float]] = _calculate_transformed_points( - original_points=source_ray_points, - transformation=source_to_transformed_matrix) - - iteration_count += 1 - - transformed_points = transformed_known_points + transformed_ray_points - point_offsets = numpy.subtract(target_points, transformed_points).tolist() - sum_point_distances = 0.0 - sum_square_point_distances = 0.0 - for delta_point_offset in point_offsets: - delta_point_distance: float = numpy.linalg.norm(delta_point_offset) - sum_point_distances += delta_point_distance - sum_square_point_distances += numpy.square(delta_point_distance) - mean_point_distance = sum_point_distances / len(point_offsets) - rms_point_distance = numpy.sqrt(sum_square_point_distances / len(point_offsets)) - - # Check if termination criteria are met - # Note that transformed_to_target_matrix describes the change since last iteration, so we often operate on it - delta_translation = numpy.linalg.norm(transformed_to_target_matrix[0:3, 3]) - delta_rotation_radians = \ - numpy.linalg.norm(Rotation.from_matrix(transformed_to_target_matrix[0:3, 0:3]).as_rotvec()) - if delta_translation < parameters.termination_delta_translation and \ - delta_rotation_radians < parameters.termination_delta_rotation_radians: - break - if mean_point_distance < parameters.termination_mean_point_distance: - break - if rms_point_distance < parameters.termination_rms_point_distance: - break - if iteration_count >= parameters.termination_iteration_count: - break - - return IterativeClosestPointOutput( - source_to_target_matrix=source_to_transformed_matrix, - iteration_count=iteration_count, - mean_point_distance=mean_point_distance, - rms_point_distance=rms_point_distance) - - -def test(): - # Transformation is approximately - source_known_points = [ - [2.0, 0.0, 2.0], - [2.0, 2.0, 2.0], - [2.0, 2.0, 0.0], - [2.0, 0.0, 0.0]] - source_ray_points = [ - [0.0, 2.0, 2.0], - [0.0, 0.0, 2.0], - [0.0, 0.0, 0.0], - [0.0, 2.0, 0.0]] - target_known_points = [ - [1.0, 1.0, -2.0], - [-1.0, 1.0, -2.0], - [-1.0, -1.0, -2.0], - [1.0, -1.0, -2.0]] - origin = [0.0, 0.0, 1.0] - sqrt3 = numpy.sqrt(3.0) - target_rays = [ - Ray(origin, [-sqrt3, sqrt3, -sqrt3]), - Ray(origin, [sqrt3, sqrt3, -sqrt3]), - Ray(origin, [sqrt3, -sqrt3, -sqrt3]), - Ray(origin, [-sqrt3, -sqrt3, -sqrt3])] - begin_datetime = datetime.datetime.utcnow() - icp_parameters = IterativeClosestPointParameters( - termination_iteration_count=100, - termination_delta_translation=0.001, - termination_delta_rotation_radians=0.001, - termination_mean_point_distance=0.0001, - termination_rms_point_distance=0.0001) - icp_output = iterative_closest_point_for_points_and_rays( - source_known_points=source_known_points, - target_known_points=target_known_points, - source_ray_points=source_ray_points, - target_rays=target_rays, - parameters=icp_parameters) - source_to_target_matrix = icp_output.source_to_target_matrix - end_datetime = datetime.datetime.utcnow() - duration = (end_datetime - begin_datetime) - duration_seconds = duration.seconds + (duration.microseconds / 1000000.0) - message = str() - for source_point in source_known_points: - source_4d = [source_point[0], source_point[1], source_point[2], 1] - target_4d = list(numpy.matmul(source_to_target_matrix, source_4d)) - target_point = [target_4d[0], target_4d[1], target_4d[2]] - message = message + str(target_point) + "\n" - message += "Algorithm took " + "{:.6f}".format(duration_seconds) + " seconds " + \ - "and took " + str(icp_output.iteration_count) + " iterations." - print(message) - - -if __name__ == "__main__": - test() diff --git a/src/pose_solver/util/line_intersection.py b/src/pose_solver/util/line_intersection.py deleted file mode 100644 index d4e82e4..0000000 --- a/src/pose_solver/util/line_intersection.py +++ /dev/null @@ -1,111 +0,0 @@ -import numpy -from src.pose_solver.structures import Ray - - -EPSILON: float = 0.0001 - - -class RayIntersection2: - parallel: bool # special case, mark it as such - closest_point_1: numpy.ndarray - closest_point_2: numpy.ndarray - - def __init__( - self, - parallel: bool, - closest_point_1: numpy.ndarray, - closest_point_2: numpy.ndarray - ): - self.parallel = parallel - self.closest_point_1 = closest_point_1 - self.closest_point_2 = closest_point_2 - - def centroid(self) -> numpy.ndarray: - return (self.closest_point_1 + self.closest_point_2) / 2 - - def distance(self) -> float: - return numpy.linalg.norm(self.closest_point_2 - self.closest_point_1) - - -class RayIntersectionN: - centroids: numpy.ndarray - - # How many rays were used. - # Note that centroids might not use all possible intersections (e.g. parallel rays) - ray_count: int - - def __init__( - self, - centroids: numpy.ndarray, - ray_count: int - ): - self.centroids = centroids - self.ray_count = ray_count - - def centroid(self) -> numpy.ndarray: - sum_centroids = numpy.asarray([0, 0, 0], dtype="float32") - for centroid in self.centroids: - sum_centroids += centroid - return sum_centroids / self.centroids.shape[0] - - def intersection_count(self) -> int: - return int((self.ray_count * (self.ray_count - 1))/2) - - -def closest_intersection_between_two_lines( - ray_1: Ray, - ray_2: Ray -) -> RayIntersection2: # Returns data on intersection - ray_1_direction_normalized = ray_1.direction / numpy.linalg.norm(ray_1.direction) - ray_2_direction_normalized = ray_2.direction / numpy.linalg.norm(ray_2.direction) - - # ray 3 will be perpendicular to both rays 1 and 2, - # and will intersect with both rays at the nearest point(s) - - ray_3_direction = numpy.cross(ray_2_direction_normalized, ray_1_direction_normalized) - ray_3_direction_norm = numpy.linalg.norm(ray_3_direction) - if ray_3_direction_norm < EPSILON: - return RayIntersection2( - parallel=True, - closest_point_1=ray_1.source_point, - closest_point_2=ray_2.source_point) - - # system of equations Ax = b - b = numpy.subtract(ray_2.source_point, ray_1.source_point) - a = numpy.asarray( - [ray_1_direction_normalized, -ray_2_direction_normalized, ray_3_direction], dtype="float32").transpose() - x = numpy.linalg.solve(a, b) - - param_ray_1 = float(x[0]) - intersection_point_1 = ray_1.source_point + param_ray_1 * ray_1_direction_normalized - - param_ray_2 = float(x[1]) - intersection_point_2 = ray_2.source_point + param_ray_2 * ray_2_direction_normalized - - return RayIntersection2( - parallel=False, - closest_point_1=intersection_point_1, - closest_point_2=intersection_point_2) - - -def closest_intersection_between_n_lines( - rays: list[Ray], - maximum_distance: float -) -> RayIntersectionN: - ray_count = len(rays) - intersections: list[RayIntersection2] = list() - for ray_1_index in range(0, ray_count): - for ray_2_index in range(ray_1_index + 1, ray_count): - intersections.append(closest_intersection_between_two_lines( - ray_1=rays[ray_1_index], - ray_2=rays[ray_2_index])) - centroids: list[numpy.ndarray] = list() - for intersection in intersections: - if intersection.parallel: - continue - if intersection.distance() > maximum_distance: - continue - centroids.append(intersection.centroid()) - return RayIntersectionN( - centroids=numpy.asarray(centroids, dtype="float32"), - ray_count=ray_count) diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index 599c369..5eb4206 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -64,7 +64,7 @@ def test(self): # All cameras have the same imaging parameters. # To simplify our lives and ensure a reasonable result, - # we'll calibrate all of the cameras with the same set of input images. + # we'll calibrate all cameras with the same set of input images. # We'll use all images from the A# and B# sets of frames. calibration_result: IntrinsicCalibration | None = None with TemporaryDirectory() as temppath: @@ -82,9 +82,6 @@ def test(self): image_resolution=IMAGE_RESOLUTION, marker_parameters=MARKER_DETECTION_PARAMETERS) - print(calibration_result.model_dump()) - return - marker: ArucoOpenCVMarker = ArucoOpenCVMarker( configuration={"method": "aruco_opencv"}, status_message_source=status_message_source) @@ -103,3 +100,17 @@ def test(self): message = f"{detection_count} detections." status_message_source.enqueue_status_message(severity="info", message=message) print(message) + + # Constraint: Reference board must be visible to all cameras for first frame_id (frame_0) + # - Estimate camera position relative to frame_0 + # MathUtils.estimate_matrix_transform_to_detector() + # - Convert points to rays for all (camera_id, frame_id) using frame_0 as basis + # MathUtils.convert_detector_corners_to_vectors() + # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. + # MathUtils.closest_intersection_between_n_lines() + # - Refine camera positions based on working_points via PnP + # MathUtils.estimate_matrix_transform_to_detector() + # Iterate max times or until convergence: + # - Convert points to rays for all (camera_id, frame_id), using working_points as basis + # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. + # - Refine camera positions based on working_points via PnP diff --git a/test/test_register_corresponding_points.py b/test/test_math_utils.py similarity index 53% rename from test/test_register_corresponding_points.py rename to test/test_math_utils.py index 7e5e085..6469826 100644 --- a/test/test_register_corresponding_points.py +++ b/test/test_math_utils.py @@ -1,11 +1,18 @@ -from src.common.util import register_corresponding_points +from src.common.util import MathUtils +from src.common.structures import \ + IterativeClosestPointParameters, \ + Ray +import datetime import numpy from scipy.spatial.transform import Rotation -import unittest +from typing import Final +from unittest import TestCase -# noinspection DuplicatedCode -class TestRegisterCorrespondingPoints(unittest.TestCase): +EPSILON: Final[float] = 0.0001 + + +class TestMathUtils(TestCase): def assertRotationCloseToIdentity( self, @@ -22,13 +29,76 @@ def assertRotationCloseToIdentity( self.assertAlmostEqual(float(matrix[2, 1]), 0.0, delta=tolerance) self.assertAlmostEqual(float(matrix[2, 2]), 1.0, delta=tolerance) - def test_identity_3_points(self): + def test_convex_quadrilateral_area(self): + points = [ + [1.0, 3.0], + [2.0, 5.0], + [5.0, 3.0], + [3.0, 2.0]] + area = 6.0 + self.assertAlmostEqual(abs(MathUtils.convex_quadrilateral_area(points)), area, delta=EPSILON) + self.assertAlmostEqual(abs(MathUtils.convex_quadrilateral_area([points[3]] + points[0:3])), area, delta=EPSILON) + + def test_iterative_closest_point(self) -> None: + # Transformation is approximately + source_known_points = [ + [2.0, 0.0, 2.0], + [2.0, 2.0, 2.0], + [2.0, 2.0, 0.0], + [2.0, 0.0, 0.0]] + source_ray_points = [ + [0.0, 2.0, 2.0], + [0.0, 0.0, 2.0], + [0.0, 0.0, 0.0], + [0.0, 2.0, 0.0]] + target_known_points = [ + [1.0, 1.0, -2.0], + [-1.0, 1.0, -2.0], + [-1.0, -1.0, -2.0], + [1.0, -1.0, -2.0]] + origin = [0.0, 0.0, 1.0] + sqrt3 = numpy.sqrt(3.0) + target_rays = [ + Ray(source_point=origin, direction=[-sqrt3, sqrt3, -sqrt3]), + Ray(source_point=origin, direction=[sqrt3, sqrt3, -sqrt3]), + Ray(source_point=origin, direction=[sqrt3, -sqrt3, -sqrt3]), + Ray(source_point=origin, direction=[-sqrt3, -sqrt3, -sqrt3])] + begin_datetime = datetime.datetime.utcnow() + icp_parameters = IterativeClosestPointParameters( + termination_iteration_count=100, + termination_delta_translation=0.001, + termination_delta_rotation_radians=0.001, + termination_mean_point_distance=0.0001, + termination_rms_point_distance=0.0001) + icp_output = MathUtils.iterative_closest_point_for_points_and_rays( + source_known_points=source_known_points, + target_known_points=target_known_points, + source_ray_points=source_ray_points, + target_rays=target_rays, + parameters=icp_parameters) + source_to_target_matrix = icp_output.source_to_target_matrix.as_numpy_array() + end_datetime = datetime.datetime.utcnow() + duration = (end_datetime - begin_datetime) + duration_seconds = duration.seconds + (duration.microseconds / 1000000.0) + message = str() + for source_point in source_known_points: + source_4d = [source_point[0], source_point[1], source_point[2], 1] + target_4d = list(numpy.matmul(source_to_target_matrix, source_4d)) + target_point = [target_4d[0], target_4d[1], target_4d[2]] + message = message + str(target_point) + "\n" + message += "Algorithm took " + "{:.6f}".format(duration_seconds) + " seconds " + \ + "and took " + str(icp_output.iteration_count) + " iterations." + print(message) + + # TODO: Comparisons, self.assertXXXXX() + + def test_register_corresponding_points_identity_3_points(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] point_set_to = point_set_from - matrix = register_corresponding_points(point_set_from, point_set_to) + matrix = MathUtils.register_corresponding_points(point_set_from, point_set_to) self.assertRotationCloseToIdentity(matrix) self.assertAlmostEqual(matrix[0, 3], 0.0) self.assertAlmostEqual(matrix[1, 3], 0.0) @@ -38,14 +108,14 @@ def test_identity_3_points(self): self.assertEqual(matrix[3, 1], 0.0) self.assertEqual(matrix[3, 2], 0.0) - def test_identity_4_points(self): + def test_register_corresponding_points_identity_4_points(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]] point_set_to = point_set_from - matrix = register_corresponding_points(point_set_from, point_set_to) + matrix = MathUtils.register_corresponding_points(point_set_from, point_set_to) self.assertRotationCloseToIdentity(matrix) self.assertAlmostEqual(matrix[0, 3], 0.0) self.assertAlmostEqual(matrix[1, 3], 0.0) @@ -55,14 +125,14 @@ def test_identity_4_points(self): self.assertEqual(matrix[3, 1], 0.0) self.assertEqual(matrix[3, 2], 0.0) - def test_translation(self): + def test_register_corresponding_points_translation(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] translation = [10, 0, 10] point_set_to = list(point_set_from + numpy.stack(translation)) - matrix = register_corresponding_points(point_set_from, point_set_to) + matrix = MathUtils.register_corresponding_points(point_set_from, point_set_to) self.assertRotationCloseToIdentity(matrix) self.assertAlmostEqual(matrix[0, 3], translation[0]) self.assertAlmostEqual(matrix[1, 3], translation[1]) @@ -72,14 +142,14 @@ def test_translation(self): self.assertEqual(matrix[3, 1], 0.0) self.assertEqual(matrix[3, 2], 0.0) - def test_rotation(self): + def test_register_corresponding_points_rotation(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] ground_truth = Rotation.as_matrix(Rotation.from_euler(seq="x", angles=90, degrees=True)) point_set_to = list(numpy.matmul(ground_truth, numpy.asarray(point_set_from).T).T) - result = register_corresponding_points(point_set_from, point_set_to) + result = MathUtils.register_corresponding_points(point_set_from, point_set_to) for i in range(0, 3): for j in range(0, 3): self.assertAlmostEqual(result[i][j], ground_truth[i][j]) @@ -91,7 +161,7 @@ def test_rotation(self): self.assertEqual(result[3, 1], 0.0) self.assertEqual(result[3, 2], 0.0) - def test_rotation_and_translation(self): + def test_register_corresponding_points_rotation_and_translation(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], @@ -100,7 +170,7 @@ def test_rotation_and_translation(self): point_set_to = list(numpy.matmul(ground_truth, numpy.asarray(point_set_from).T).T) translation = [10, -20, 30] point_set_to = list(point_set_to + numpy.stack(translation)) - result = register_corresponding_points(point_set_from, point_set_to) + result = MathUtils.register_corresponding_points(point_set_from, point_set_to) for i in range(0, 3): for j in range(0, 3): self.assertAlmostEqual(result[i][j], ground_truth[i][j]) @@ -112,15 +182,15 @@ def test_rotation_and_translation(self): self.assertEqual(result[3, 1], 0.0) self.assertEqual(result[3, 2], 0.0) - def test_too_few_points(self): + def test_register_corresponding_points_too_few_points(self): point_set_from = [[0.0, 0.0, 0.0]] point_set_to = point_set_from try: - register_corresponding_points(point_set_from, point_set_to) + MathUtils.register_corresponding_points(point_set_from, point_set_to) except ValueError: pass - def test_inequal_point_set_lengths(self): + def test_register_corresponding_points_inequal_point_set_lengths(self): point_set_from = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], @@ -131,28 +201,29 @@ def test_inequal_point_set_lengths(self): [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] try: - register_corresponding_points(point_set_from, point_set_to) + MathUtils.register_corresponding_points(point_set_from, point_set_to) except ValueError: pass - def test_collinear(self): + def test_register_corresponding_points_collinear(self): point_set_from = [ [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]] point_set_to = point_set_from try: - register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) + MathUtils.register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) except ValueError: pass - def test_singular(self): + def test_register_corresponding_points_singular(self): point_set_from = [ [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] point_set_to = point_set_from try: - register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) + MathUtils.register_corresponding_points(point_set_from, point_set_to, collinearity_do_check=True) except ValueError: pass + diff --git a/test/test_pose_solver.py b/test/test_pose_solver.py index 0188f22..c8ddb1d 100644 --- a/test/test_pose_solver.py +++ b/test/test_pose_solver.py @@ -6,8 +6,7 @@ MarkerCornerImagePoint, \ MarkerSnapshot, \ Matrix4x4, \ - Pose -from src.common.structures import \ + Pose, \ TargetMarker import datetime from typing import Final @@ -101,8 +100,10 @@ def assertRotationCloseToIdentity( def test_single_camera_viewing_target_marker(self): # Note that single-marker tests are particularly susceptible to reference pose ambiguity - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(datetime.timezone.utc) pose_solver: PoseSolver = PoseSolver() + # TODO: The following line shall be replaced upon implementation of an appropriate alternative + pose_solver._parameters.minimum_detector_count = 1 pose_solver.set_intrinsic_parameters( detector_label=DETECTOR_RED_NAME, intrinsic_parameters=DETECTOR_RED_INTRINSICS) @@ -164,7 +165,7 @@ def test_single_camera_viewing_target_marker(self): def test_four_cameras_viewing_target_marker(self): # Note that single-marker tests are particularly susceptible to reference pose ambiguity - now_utc = datetime.datetime.utcnow() + now_utc = datetime.datetime.now(datetime.timezone.utc) pose_solver: PoseSolver = PoseSolver() pose_solver.set_intrinsic_parameters( detector_label=DETECTOR_RED_NAME, From b5410566b98f5a46684e7a3eea1aa64787febddc Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 9 Jul 2025 16:16:24 -0400 Subject: [PATCH 04/33] WIP: Further consolidation of classes under `common` --- src/board_builder/structures/pose_location.py | 11 +- .../test/accuracy/accuracy_test.py | 16 +- .../test/repeatability/repeatability_test.py | 3 +- .../utils/board_builder_pose_solver.py | 2 +- src/common/__init__.py | 19 ++- src/common/api.py | 119 +++++++++++++++ src/common/api/__init__.py | 12 -- .../api/dequeue_status_messages_request.py | 14 -- .../api/dequeue_status_messages_response.py | 17 --- src/common/api/empty_response.py | 14 -- src/common/api/error_response.py | 16 -- src/common/api/mct_request.py | 7 - src/common/api/mct_request_series.py | 6 - src/common/api/mct_response.py | 7 - src/common/api/mct_response_series.py | 7 - src/common/api/time_sync_start_request.py | 14 -- src/common/api/time_sync_stop_request.py | 14 -- src/common/api/timestamp_get_request.py | 16 -- src/common/api/timestamp_get_response.py | 17 --- .../client_identifier_from_connection.py | 7 - .../mct_parsing_error.py => exceptions.py} | 4 +- src/common/exceptions/__init__.py | 2 - src/common/exceptions/mct_error.py | 3 - src/common/get_kwarg.py | 29 ---- src/common/image_coding.py | 84 ----------- src/common/image_utils.py | 46 ------ src/common/mct_component.py | 20 +-- src/common/standard_resolutions.py | 25 --- ...s_message_source.py => status_messages.py} | 38 ++++- src/common/structures/__init__.py | 76 +++++----- ...haruco_board_specification.py => aruco.py} | 55 ++++++- .../structures/aruco_board_specification.py | 28 ---- src/common/structures/capture_format.py | 4 - src/common/structures/component_role_label.py | 5 - src/common/structures/corner_refinement.py | 20 --- src/common/structures/detector.py | 51 +++++++ src/common/structures/detector_frame.py | 14 -- .../{intrinsic_parameters.py => image.py} | 64 +++++++- src/common/structures/image_resolution.py | 42 ------ .../structures/intrinsic_calibration.py | 24 --- src/common/structures/linear_algebra.py | 10 ++ .../structures/marker_corner_image_point.py | 6 - src/common/structures/marker_corners.py | 20 --- src/common/structures/marker_definition.py | 28 ---- src/common/structures/marker_snapshot.py | 7 - src/common/structures/mct_component.py | 11 ++ src/common/structures/mct_parsable.py | 56 ------- .../structures/{target.py => pose_solver.py} | 31 ++++ src/common/structures/pose_solver_frame.py | 12 -- src/common/structures/pose_solver_status.py | 20 --- ...y_value_structures.py => serialization.py} | 57 ++++++- src/common/structures/status_message.py | 25 --- src/common/structures/vec3.py | 7 - src/common/util/__init__.py | 3 + src/common/util/image_utils.py | 142 ++++++++++++++++++ src/common/util/network_utils.py | 17 +++ src/common/util/python_utils.py | 42 ++++++ src/controller/mct_controller.py | 5 +- src/controller/structures/connection.py | 7 +- .../structures/mct_component_address.py | 2 +- .../structures/pose_solver_connection.py | 2 +- src/detector/__init__.py | 2 +- src/detector/detector.py | 32 ++-- src/detector/detector_app.py | 6 +- .../camera_opencv_capture_device.py | 5 +- src/detector/interfaces/abstract_camera.py | 7 +- src/detector/interfaces/abstract_marker.py | 5 +- ...{calibrator.py => intrinsic_calibrator.py} | 19 ++- src/detector/structures/__init__.py | 29 ++-- .../structures/calibration_configuration.py | 5 - .../structures/calibration_image_metadata.py | 10 -- .../structures/calibration_image_state.py | 8 - src/detector/structures/calibration_map.py | 51 ++++++- .../structures/calibration_map_entry.py | 8 - .../structures/calibration_map_value.py | 8 - .../structures/calibration_result_metadata.py | 14 -- .../structures/calibration_result_state.py | 14 -- .../structures/camera_configuration.py | 7 - .../structures/detector_configuration.py | 17 ++- .../structures/marker_configuration.py | 5 - src/detector/structures/marker_status.py | 8 - .../{camera_status.py => status.py} | 6 + src/gui/panels/board_builder_panel.py | 79 +++++----- src/gui/panels/calibrator_panel.py | 5 +- src/gui/panels/controller_panel.py | 3 +- src/gui/panels/detector_panel.py | 12 +- src/gui/panels/specialized/log_panel.py | 2 +- src/pose_solver/pose_solver.py | 2 +- src/pose_solver/pose_solver_api.py | 16 +- src/slicer_connection.py | 4 +- ...generate_target_definition_from_charuco.py | 6 +- src/util/measure_detector_to_reference.py | 2 +- test/test_extrinsic_calibration.py | 8 +- test/test_math_utils.py | 2 +- 94 files changed, 907 insertions(+), 982 deletions(-) create mode 100644 src/common/api.py delete mode 100644 src/common/api/__init__.py delete mode 100644 src/common/api/dequeue_status_messages_request.py delete mode 100644 src/common/api/dequeue_status_messages_response.py delete mode 100644 src/common/api/empty_response.py delete mode 100644 src/common/api/error_response.py delete mode 100644 src/common/api/mct_request.py delete mode 100644 src/common/api/mct_request_series.py delete mode 100644 src/common/api/mct_response.py delete mode 100644 src/common/api/mct_response_series.py delete mode 100644 src/common/api/time_sync_start_request.py delete mode 100644 src/common/api/time_sync_stop_request.py delete mode 100644 src/common/api/timestamp_get_request.py delete mode 100644 src/common/api/timestamp_get_response.py delete mode 100644 src/common/client_identifier_from_connection.py rename src/common/{exceptions/mct_parsing_error.py => exceptions.py} (67%) delete mode 100644 src/common/exceptions/__init__.py delete mode 100644 src/common/exceptions/mct_error.py delete mode 100644 src/common/get_kwarg.py delete mode 100644 src/common/image_coding.py delete mode 100644 src/common/image_utils.py delete mode 100644 src/common/standard_resolutions.py rename src/common/{status_message_source.py => status_messages.py} (76%) rename src/common/structures/{charuco_board_specification.py => aruco.py} (62%) delete mode 100644 src/common/structures/aruco_board_specification.py delete mode 100644 src/common/structures/capture_format.py delete mode 100644 src/common/structures/component_role_label.py delete mode 100644 src/common/structures/corner_refinement.py create mode 100644 src/common/structures/detector.py delete mode 100644 src/common/structures/detector_frame.py rename src/common/structures/{intrinsic_parameters.py => image.py} (60%) delete mode 100644 src/common/structures/image_resolution.py delete mode 100644 src/common/structures/intrinsic_calibration.py delete mode 100644 src/common/structures/marker_corner_image_point.py delete mode 100644 src/common/structures/marker_corners.py delete mode 100644 src/common/structures/marker_definition.py delete mode 100644 src/common/structures/marker_snapshot.py create mode 100644 src/common/structures/mct_component.py delete mode 100644 src/common/structures/mct_parsable.py rename src/common/structures/{target.py => pose_solver.py} (80%) delete mode 100644 src/common/structures/pose_solver_frame.py delete mode 100644 src/common/structures/pose_solver_status.py rename src/common/structures/{key_value_structures.py => serialization.py} (62%) delete mode 100644 src/common/structures/status_message.py delete mode 100644 src/common/structures/vec3.py create mode 100644 src/common/util/image_utils.py create mode 100644 src/common/util/network_utils.py create mode 100644 src/common/util/python_utils.py rename src/detector/{calibrator.py => intrinsic_calibrator.py} (98%) delete mode 100644 src/detector/structures/calibration_configuration.py delete mode 100644 src/detector/structures/calibration_image_metadata.py delete mode 100644 src/detector/structures/calibration_image_state.py delete mode 100644 src/detector/structures/calibration_map_entry.py delete mode 100644 src/detector/structures/calibration_map_value.py delete mode 100644 src/detector/structures/calibration_result_metadata.py delete mode 100644 src/detector/structures/calibration_result_state.py delete mode 100644 src/detector/structures/camera_configuration.py delete mode 100644 src/detector/structures/marker_configuration.py delete mode 100644 src/detector/structures/marker_status.py rename src/detector/structures/{camera_status.py => status.py} (65%) diff --git a/src/board_builder/structures/pose_location.py b/src/board_builder/structures/pose_location.py index c0af262..fb96037 100644 --- a/src/board_builder/structures/pose_location.py +++ b/src/board_builder/structures/pose_location.py @@ -1,18 +1,15 @@ +from src.common.util import MathUtils +from src.common.structures import Matrix4x4, Pose import datetime - import numpy as np -from numpy._typing import NDArray -from typing import Any from scipy.spatial.transform import Rotation as R -from src.common.util.average_quaternion import average_quaternion -from src.common.structures import Matrix4x4, Pose class PoseLocation: _id: str _timestamp: str - _TMatrix: NDArray[Any] + _TMatrix: np.ndarray _RMAT_list: list _TVEC_list: list @@ -36,7 +33,7 @@ def add_matrix(self, transformation_matrix: Matrix4x4, timestamp: str): quaternions = [R.from_matrix(rot).as_quat(canonical=True) for rot in self._RMAT_list] quaternions = [[float(quaternion[i]) for i in range(0, 4)] for quaternion in quaternions] - avg_quat = average_quaternion(quaternions) + avg_quat = MathUtils.average_quaternion(quaternions) avg_rotation = R.from_quat(avg_quat).as_matrix() self._TMatrix[:3, :3] = avg_rotation diff --git a/src/board_builder/test/accuracy/accuracy_test.py b/src/board_builder/test/accuracy/accuracy_test.py index f9c9a5a..be4fa60 100644 --- a/src/board_builder/test/accuracy/accuracy_test.py +++ b/src/board_builder/test/accuracy/accuracy_test.py @@ -1,21 +1,19 @@ -import os - -import numpy as np -import importlib -import json - -from structures import AccuracyTestParameters from src.board_builder.board_builder import BoardBuilder -from src.common.util import MathUtils +from src.common import MathUtils from src.common.structures import \ MarkerCornerImagePoint, \ MarkerSnapshot, \ TargetBoard, \ Marker -from utils import \ +from .structures import AccuracyTestParameters +from .utils import \ generate_virtual_snapshots, \ generate_data, \ graph_renderer +import json +import numpy as np +import importlib +import os class AccuracyTest: diff --git a/src/board_builder/test/repeatability/repeatability_test.py b/src/board_builder/test/repeatability/repeatability_test.py index 13534f3..7c674f4 100644 --- a/src/board_builder/test/repeatability/repeatability_test.py +++ b/src/board_builder/test/repeatability/repeatability_test.py @@ -1,7 +1,8 @@ +from src.common import MathUtils import json import os import numpy as np -from src.common.util import MathUtils + def transform_point(point, matrix): """Applies a 4x4 transformation matrix to a 3D point.""" diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py index 48325ea..5d3ef53 100644 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ b/src/board_builder/utils/board_builder_pose_solver.py @@ -2,6 +2,7 @@ MarkerRaySet, \ PoseData, \ PoseLocation +from src.common import MathUtils from src.common.structures import \ CharucoBoardSpecification, \ IntrinsicParameters, \ @@ -12,7 +13,6 @@ Ray, \ TargetBase, \ TargetMarker -from src.common.util import MathUtils from src.pose_solver.structures import PoseSolverParameters import cv2 import cv2.aruco diff --git a/src/common/__init__.py b/src/common/__init__.py index c660b37..565d53e 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -11,10 +11,17 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from .client_identifier_from_connection import client_identifier_from_connection -from .get_kwarg import get_kwarg -from .image_coding import ImageCoding -from .image_utils import ImageUtils +from .exceptions import \ + MCTError, \ + MCTParsingError +from .util import \ + ImageUtils, \ + IOUtils, \ + MathUtils, \ + NetworkUtils, \ + PythonUtils from .mct_component import MCTComponent -from .standard_resolutions import StandardResolutions -from .status_message_source import StatusMessageSource +from .status_messages import \ + SeverityLabel, \ + StatusMessage, \ + StatusMessageSource diff --git a/src/common/api.py b/src/common/api.py new file mode 100644 index 0000000..a23c2c3 --- /dev/null +++ b/src/common/api.py @@ -0,0 +1,119 @@ +from .status_messages import StatusMessage +from .structures import MCTParsable +import abc +from pydantic import BaseModel, Field, SerializeAsAny +from typing import Final, Literal + + +class MCTRequest(BaseModel, MCTParsable, abc.ABC): + parsable_type: str + + +class MCTRequestSeries(BaseModel): + series: list[SerializeAsAny[MCTRequest]] = Field() + + +class MCTResponse(BaseModel, MCTParsable, abc.ABC): + parsable_type: str + + +class MCTResponseSeries(BaseModel): + series: list[SerializeAsAny[MCTResponse]] = Field(default=list()) + responder: str = Field(default=str()) + + +class EmptyResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "empty" + + @staticmethod + def parsable_type_identifier() -> str: + return EmptyResponse._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class ErrorResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "error" + + @staticmethod + def parsable_type_identifier() -> str: + return ErrorResponse._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + message: str = Field() + + +class DequeueStatusMessagesRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" + + @staticmethod + def parsable_type_identifier() -> str: + return DequeueStatusMessagesRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class DequeueStatusMessagesResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" + + @staticmethod + def parsable_type_identifier() -> str: + return DequeueStatusMessagesResponse._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + status_messages: list[StatusMessage] = Field() + + +class TimeSyncStartRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "time_sync_start_request" + + @staticmethod + def parsable_type_identifier() -> str: + return TimeSyncStartRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class TimeSyncStopRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "time_sync_stop_request" + + @staticmethod + def parsable_type_identifier() -> str: + return TimeSyncStopRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class TimestampGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "timestamp_get_request" + + @staticmethod + def parsable_type_identifier() -> str: + return TimestampGetRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + requester_timestamp_utc_iso8601: str = Field() + + +class TimestampGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "timestamp_get_response" + + @staticmethod + def parsable_type_identifier() -> str: + return TimestampGetResponse._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + requester_timestamp_utc_iso8601: str = Field() + responder_timestamp_utc_iso8601: str = Field() diff --git a/src/common/api/__init__.py b/src/common/api/__init__.py deleted file mode 100644 index 5805c52..0000000 --- a/src/common/api/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .dequeue_status_messages_request import DequeueStatusMessagesRequest -from .dequeue_status_messages_response import DequeueStatusMessagesResponse -from .empty_response import EmptyResponse -from .error_response import ErrorResponse -from .mct_request import MCTRequest -from .mct_request_series import MCTRequestSeries -from .mct_response import MCTResponse -from .mct_response_series import MCTResponseSeries -from .timestamp_get_request import TimestampGetRequest -from .timestamp_get_response import TimestampGetResponse -from .time_sync_start_request import TimeSyncStartRequest -from .time_sync_stop_request import TimeSyncStopRequest diff --git a/src/common/api/dequeue_status_messages_request.py b/src/common/api/dequeue_status_messages_request.py deleted file mode 100644 index d3eb040..0000000 --- a/src/common/api/dequeue_status_messages_request.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class DequeueStatusMessagesRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" - - @staticmethod - def parsable_type_identifier() -> str: - return DequeueStatusMessagesRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/dequeue_status_messages_response.py b/src/common/api/dequeue_status_messages_response.py deleted file mode 100644 index d6403f9..0000000 --- a/src/common/api/dequeue_status_messages_response.py +++ /dev/null @@ -1,17 +0,0 @@ -from .mct_response import MCTResponse -from src.common.structures.status_message import StatusMessage -from pydantic import Field -from typing import Final, Literal - - -class DequeueStatusMessagesResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" - - @staticmethod - def parsable_type_identifier() -> str: - return DequeueStatusMessagesResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - status_messages: list[StatusMessage] = Field() diff --git a/src/common/api/empty_response.py b/src/common/api/empty_response.py deleted file mode 100644 index 242149d..0000000 --- a/src/common/api/empty_response.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import Field -from typing import Final, Literal - - -class EmptyResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "empty" - - @staticmethod - def parsable_type_identifier() -> str: - return EmptyResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/error_response.py b/src/common/api/error_response.py deleted file mode 100644 index bf910fd..0000000 --- a/src/common/api/error_response.py +++ /dev/null @@ -1,16 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import Field -from typing import Final, Literal - - -class ErrorResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "error" - - @staticmethod - def parsable_type_identifier() -> str: - return ErrorResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - message: str = Field() diff --git a/src/common/api/mct_request.py b/src/common/api/mct_request.py deleted file mode 100644 index 359ec9d..0000000 --- a/src/common/api/mct_request.py +++ /dev/null @@ -1,7 +0,0 @@ -from ..structures.mct_parsable import MCTParsable -import abc -from pydantic import BaseModel - - -class MCTRequest(BaseModel, MCTParsable, abc.ABC): - parsable_type: str diff --git a/src/common/api/mct_request_series.py b/src/common/api/mct_request_series.py deleted file mode 100644 index 877054f..0000000 --- a/src/common/api/mct_request_series.py +++ /dev/null @@ -1,6 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import BaseModel, Field, SerializeAsAny - - -class MCTRequestSeries(BaseModel): - series: list[SerializeAsAny[MCTRequest]] = Field() diff --git a/src/common/api/mct_response.py b/src/common/api/mct_response.py deleted file mode 100644 index 9d8ba01..0000000 --- a/src/common/api/mct_response.py +++ /dev/null @@ -1,7 +0,0 @@ -from ..structures.mct_parsable import MCTParsable -import abc -from pydantic import BaseModel - - -class MCTResponse(BaseModel, MCTParsable, abc.ABC): - parsable_type: str diff --git a/src/common/api/mct_response_series.py b/src/common/api/mct_response_series.py deleted file mode 100644 index fd70fc0..0000000 --- a/src/common/api/mct_response_series.py +++ /dev/null @@ -1,7 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import BaseModel, Field, SerializeAsAny - - -class MCTResponseSeries(BaseModel): - series: list[SerializeAsAny[MCTResponse]] = Field(default=list()) - responder: str = Field(default=str()) diff --git a/src/common/api/time_sync_start_request.py b/src/common/api/time_sync_start_request.py deleted file mode 100644 index db45559..0000000 --- a/src/common/api/time_sync_start_request.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class TimeSyncStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "time_sync_start_request" - - @staticmethod - def parsable_type_identifier() -> str: - return TimeSyncStartRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/time_sync_stop_request.py b/src/common/api/time_sync_stop_request.py deleted file mode 100644 index 4942f89..0000000 --- a/src/common/api/time_sync_stop_request.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class TimeSyncStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "time_sync_stop_request" - - @staticmethod - def parsable_type_identifier() -> str: - return TimeSyncStopRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/common/api/timestamp_get_request.py b/src/common/api/timestamp_get_request.py deleted file mode 100644 index a3f9708..0000000 --- a/src/common/api/timestamp_get_request.py +++ /dev/null @@ -1,16 +0,0 @@ -from .mct_request import MCTRequest -from pydantic import Field -from typing import Final, Literal - - -class TimestampGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "timestamp_get_request" - - @staticmethod - def parsable_type_identifier() -> str: - return TimestampGetRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - requester_timestamp_utc_iso8601: str = Field() diff --git a/src/common/api/timestamp_get_response.py b/src/common/api/timestamp_get_response.py deleted file mode 100644 index 2699e81..0000000 --- a/src/common/api/timestamp_get_response.py +++ /dev/null @@ -1,17 +0,0 @@ -from .mct_response import MCTResponse -from pydantic import Field -from typing import Final, Literal - - -class TimestampGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "timestamp_get_response" - - @staticmethod - def parsable_type_identifier() -> str: - return TimestampGetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - requester_timestamp_utc_iso8601: str = Field() - responder_timestamp_utc_iso8601: str = Field() diff --git a/src/common/client_identifier_from_connection.py b/src/common/client_identifier_from_connection.py deleted file mode 100644 index a6b05e6..0000000 --- a/src/common/client_identifier_from_connection.py +++ /dev/null @@ -1,7 +0,0 @@ -from fastapi import Request, WebSocket - - -def client_identifier_from_connection( - connection: Request | WebSocket -) -> str: - return f"{connection.client.host}:{connection.client.port}" diff --git a/src/common/exceptions/mct_parsing_error.py b/src/common/exceptions.py similarity index 67% rename from src/common/exceptions/mct_parsing_error.py rename to src/common/exceptions.py index a550f2c..dd46d7a 100644 --- a/src/common/exceptions/mct_parsing_error.py +++ b/src/common/exceptions.py @@ -1,4 +1,6 @@ -from .mct_error import MCTError +class MCTError(Exception): + def __init__(self, *args): + super().__init__(*args) class MCTParsingError(MCTError): diff --git a/src/common/exceptions/__init__.py b/src/common/exceptions/__init__.py deleted file mode 100644 index f5f7ff8..0000000 --- a/src/common/exceptions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .mct_error import MCTError -from .mct_parsing_error import MCTParsingError diff --git a/src/common/exceptions/mct_error.py b/src/common/exceptions/mct_error.py deleted file mode 100644 index 97abf2f..0000000 --- a/src/common/exceptions/mct_error.py +++ /dev/null @@ -1,3 +0,0 @@ -class MCTError(Exception): - def __init__(self, *args): - super().__init__(*args) diff --git a/src/common/get_kwarg.py b/src/common/get_kwarg.py deleted file mode 100644 index 851e431..0000000 --- a/src/common/get_kwarg.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import TypeVar -T = TypeVar("T") - - -def get_kwarg( - kwargs: dict, - key: str, - arg_type: type[T], - required: bool = True -) -> T | None: - """ - :param kwargs: kwargs as a dict (without the "**") - :param key: key to search for - :param arg_type: expected type - :param required: If the keyword does not exist, then: - required == True -> Raise ValueError - required == False -> Return None - """ - - if key not in kwargs: - if required: - raise ValueError(f"Missing required key {key} in keyword arguments.") - return None - value: T = kwargs[key] - if not isinstance(value, arg_type): - raise ValueError( - f"Expected keyword argument {key} to be of type {arg_type.__name__}, " - f"but got {type(value).__name__}.") - return value diff --git a/src/common/image_coding.py b/src/common/image_coding.py deleted file mode 100644 index b9ed6d1..0000000 --- a/src/common/image_coding.py +++ /dev/null @@ -1,84 +0,0 @@ -from .structures import CaptureFormat -import base64 -import cv2 -import logging -import numpy -from typing import Literal - - -logger = logging.getLogger(__file__) - -ColorMode = Literal["color", "greyscale"] - - -class ImageCoding: - """ - A "class" to group related static functions, like in a namespace. - The class itself is not meant to be instantiated. - """ - - def __init__(self): - raise RuntimeError("ImageCoding is not meant to be instantiated.") - - @staticmethod - def base64_to_image( - input_base64: str, - color_mode: ColorMode = "color" - ) -> numpy.ndarray: - """ - Assumes 8 bits per component - """ - - image_bytes: bytes = base64.b64decode(s=input_base64) - - color_flag: int = 0 - if color_mode == "color": - color_flag |= cv2.IMREAD_COLOR - elif color_mode == "greyscale": - color_flag |= cv2.IMREAD_GRAYSCALE - else: - logger.warning(f"Unsupported color mode specified: {color_mode}") - - opencv_image: numpy.ndarray = cv2.imdecode( - buf=numpy.frombuffer( - buffer=image_bytes, - dtype=numpy.uint8), - flags=color_flag) - return opencv_image - - @staticmethod - def bytes_to_base64( - image_bytes: bytes - ) -> str: - return base64.b64encode(image_bytes).decode("ascii") - - @staticmethod - def image_to_base64( - image_data: numpy.ndarray, - image_format: CaptureFormat = ".png", - ) -> str: - """ - :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) - :param image_format: e.g. ".jpg", ".png"... - :return: base64 string representing the image - """ - encoded_image_rgb_bytes: bytes = ImageCoding.image_to_bytes( - image_data=image_data, - image_format=image_format) - encoded_image_rgb_base64: str = ImageCoding.bytes_to_base64(encoded_image_rgb_bytes) - return encoded_image_rgb_base64 - - @staticmethod - def image_to_bytes( - image_data: numpy.ndarray, - image_format: CaptureFormat = ".png", - ) -> bytes: - """ - :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) - :param image_format: e.g. ".jpg", ".png"... - :return: base64 string representing the image - """ - encoded_image_rgb_single_row: numpy.array - encoded, encoded_image_rgb_single_row = cv2.imencode(image_format, image_data) - encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() - return encoded_image_rgb_bytes diff --git a/src/common/image_utils.py b/src/common/image_utils.py deleted file mode 100644 index 7cc4546..0000000 --- a/src/common/image_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -import cv2 -import numpy - - -class ImageUtils: - """ - A "class" to group related static functions, like in a namespace. - The class itself is not meant to be instantiated. - """ - - def __init__(self): - raise RuntimeError("ImageUtils is not meant to be instantiated.") - - @staticmethod - def black_image( - resolution_px: tuple[int, int], - ) -> numpy.ndarray: - return numpy.zeros((resolution_px[1], resolution_px[0], 3), dtype=numpy.uint8) - - @staticmethod - def image_resize_to_fit( - opencv_image: numpy.ndarray, - available_size: tuple[int, int] # x, y - ) -> numpy.ndarray: - # note: opencv height represented by 1st dimension - source_resolution_px: tuple[int, int] = (opencv_image.shape[1], opencv_image.shape[0]) - image_width_px, image_height_px = ImageUtils.scale_factor_for_available_space_px( - source_resolution_px=source_resolution_px, - available_size_px=available_size) - return cv2.resize( - src=opencv_image, - dsize=(image_width_px, image_height_px)) - - @staticmethod - def scale_factor_for_available_space_px( - source_resolution_px: tuple[int, int], - available_size_px: tuple[int, int] - ) -> tuple[int, int]: - source_width_px: int = source_resolution_px[0] - source_height_px: int = source_resolution_px[1] - available_width_px: int = available_size_px[0] - available_height_px: int = available_size_px[1] - scale: float = min( - available_width_px / float(source_width_px), - available_height_px / float(source_height_px)) - return int(round(source_width_px * scale)), int(round(source_height_px * scale)) diff --git a/src/common/mct_component.py b/src/common/mct_component.py index 490a639..deef008 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -1,6 +1,4 @@ -from .get_kwarg import get_kwarg -from .status_message_source import StatusMessageSource -from src.common.api import \ +from .api import \ DequeueStatusMessagesRequest, \ DequeueStatusMessagesResponse, \ EmptyResponse, \ @@ -13,17 +11,21 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from src.common.exceptions import MCTParsingError -from src.common.structures import \ - MCTParsable, \ +from .exceptions import MCTParsingError +from .status_messages import \ SeverityLabel, \ - StatusMessage + StatusMessage, \ + StatusMessageSource +from .structures import MCTParsable +from .util import \ + PythonUtils import abc import datetime from fastapi import WebSocket, WebSocketDisconnect import logging from typing import Callable, Optional, TypeVar + logger = logging.getLogger(__name__) @@ -83,7 +85,7 @@ def dequeue_status_messages(self, **kwargs) -> DequeueStatusMessagesResponse: """ :key client_identifier: str """ - client_identifier: str = get_kwarg( + client_identifier: str = PythonUtils.get_kwarg( kwargs=kwargs, key="client_identifier", arg_type=str) @@ -111,7 +113,7 @@ def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCT TimeSyncStopRequest: self.time_sync_stop} def timestamp_get(self, **kwargs) -> TimestampGetResponse: - request: TimestampGetRequest = get_kwarg( + request: TimestampGetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=TimestampGetRequest) diff --git a/src/common/standard_resolutions.py b/src/common/standard_resolutions.py deleted file mode 100644 index f65081b..0000000 --- a/src/common/standard_resolutions.py +++ /dev/null @@ -1,25 +0,0 @@ -from .structures import ImageResolution -from typing import Final - - -class StandardResolutions: - RES_640X360: Final[ImageResolution] = ImageResolution(x_px=640, y_px=360) - RES_640X480: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) - RES_800X600: Final[ImageResolution] = ImageResolution(x_px=800, y_px=600) - RES_1024X768: Final[ImageResolution] = ImageResolution(x_px=1024, y_px=768) - RES_1280X720: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=720) - RES_1280X800: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=800) - RES_1280X1024: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=1024) - RES_1920X1080: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) - - @staticmethod - def as_list(): - return [ - StandardResolutions.RES_640X360, - StandardResolutions.RES_640X480, - StandardResolutions.RES_800X600, - StandardResolutions.RES_1024X768, - StandardResolutions.RES_1280X720, - StandardResolutions.RES_1280X800, - StandardResolutions.RES_1280X1024, - StandardResolutions.RES_1920X1080] diff --git a/src/common/status_message_source.py b/src/common/status_messages.py similarity index 76% rename from src/common/status_message_source.py rename to src/common/status_messages.py index dbb38ce..a358dc8 100644 --- a/src/common/status_message_source.py +++ b/src/common/status_messages.py @@ -1,13 +1,45 @@ -from src.common.structures import \ - SeverityLabel, \ - StatusMessage import datetime +from enum import StrEnum import logging +from pydantic import BaseModel, Field +from typing import Final + logger = logging.getLogger(__name__) +SEVERITY_LABEL_DEBUG: Final[str] = "debug" +SEVERITY_LABEL_INFO: Final[str] = "info" +SEVERITY_LABEL_WARNING: Final[str] = "warning" +SEVERITY_LABEL_ERROR: Final[str] = "error" +SEVERITY_LABEL_CRITICAL: Final[str] = "critical" +class SeverityLabel(StrEnum): + DEBUG: Final[str] = SEVERITY_LABEL_DEBUG + INFO: Final[str] = SEVERITY_LABEL_INFO + WARNING: Final[str] = SEVERITY_LABEL_WARNING + ERROR: Final[str] = SEVERITY_LABEL_ERROR + CRITICAL: Final[str] = SEVERITY_LABEL_CRITICAL + + +SEVERITY_LABEL_TO_INT: Final[dict[SeverityLabel, int]] = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL} + + +class StatusMessage(BaseModel): + source_label: str = Field() + severity: SeverityLabel = Field() + message: str + timestamp_utc_iso8601: str + + class StatusMessageSource: + """ + Class to facilitate the management of status messages sent between components + """ _source_label: str diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py index cc51110..b2549b0 100644 --- a/src/common/structures/__init__.py +++ b/src/common/structures/__init__.py @@ -1,24 +1,42 @@ -from .capture_format import CaptureFormat -from .charuco_board_specification import CharucoBoardSpecification -from .component_role_label import \ - ComponentRoleLabel, \ - COMPONENT_ROLE_LABEL_DETECTOR, \ - COMPONENT_ROLE_LABEL_POSE_SOLVER -from .corner_refinement import \ +from .aruco import \ + CharucoBoardSpecification, \ CornerRefinementMethod, \ CORNER_REFINEMENT_METHOD_NONE, \ CORNER_REFINEMENT_METHOD_SUBPIX, \ CORNER_REFINEMENT_METHOD_CONTOUR,\ CORNER_REFINEMENT_METHOD_APRILTAG, \ CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT -from .detector_frame import DetectorFrame -from .image_resolution import ImageResolution -from .intrinsic_calibration import \ + CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT, \ + MarkerDefinition +from .detector import \ + CaptureFormat, \ + DetectorFrame, \ + MarkerCorners, \ + MarkerCornerImagePoint, \ + MarkerSnapshot +from .image import \ + ImageResolution, \ IntrinsicCalibration, \ - IntrinsicCalibrationFrameResult -from .intrinsic_parameters import IntrinsicParameters -from .key_value_structures import \ + IntrinsicCalibrationFrameResult, \ + IntrinsicParameters +from .linear_algebra import \ + IterativeClosestPointParameters, \ + Matrix4x4, \ + Pose, \ + Ray, \ + Vec3 +from .mct_component import \ + ComponentRoleLabel, \ + COMPONENT_ROLE_LABEL_DETECTOR, \ + COMPONENT_ROLE_LABEL_POSE_SOLVER +from .pose_solver import \ + Marker, \ + PoseSolverFrame, \ + PoseSolverStatus, \ + TargetBase, \ + TargetBoard, \ + TargetMarker +from .serialization import \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ KeyValueSimpleBool, \ @@ -31,31 +49,5 @@ KeyValueMetaEnum, \ KeyValueMetaFloat, \ KeyValueMetaInt, \ - key_value_meta_to_simple -from .linear_algebra import \ - IterativeClosestPointParameters, \ - Matrix4x4, \ - Pose, \ - Ray -from .marker_corner_image_point import MarkerCornerImagePoint -from .marker_corners import MarkerCorners -from .marker_definition import MarkerDefinition -from .marker_snapshot import MarkerSnapshot -from .mct_parsable import MCTParsable -from .pose_solver_frame import PoseSolverFrame -from .pose_solver_status import PoseSolverStatus -from .status_message import \ - SeverityLabel, \ - SEVERITY_LABEL_DEBUG, \ - SEVERITY_LABEL_INFO, \ - SEVERITY_LABEL_WARNING, \ - SEVERITY_LABEL_ERROR, \ - SEVERITY_LABEL_CRITICAL, \ - SEVERITY_LABEL_TO_INT, \ - StatusMessage -from .target import \ - Marker, \ - TargetBase, \ - TargetBoard, \ - TargetMarker -from .vec3 import Vec3 + key_value_meta_to_simple, \ + MCTParsable diff --git a/src/common/structures/charuco_board_specification.py b/src/common/structures/aruco.py similarity index 62% rename from src/common/structures/charuco_board_specification.py rename to src/common/structures/aruco.py index 6c224a5..9059334 100644 --- a/src/common/structures/charuco_board_specification.py +++ b/src/common/structures/aruco.py @@ -1,6 +1,30 @@ -from pydantic import BaseModel, Field -from typing import Any, Tuple +import base64 +import cv2 import cv2.aruco +import numpy +from pydantic import BaseModel, Field +from typing import Any, Final, Literal, Tuple + + +CornerRefinementMethod = Literal["NONE", "SUBPIX", "CONTOUR", "APRILTAG"] +CORNER_REFINEMENT_METHOD_NONE: Final[str] = 'NONE' +CORNER_REFINEMENT_METHOD_SUBPIX: Final[str] = 'SUBPIX' +CORNER_REFINEMENT_METHOD_CONTOUR: Final[str] = 'CONTOUR' +CORNER_REFINEMENT_METHOD_APRILTAG: Final[str] = 'APRILTAG' + + +CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: dict[CornerRefinementMethod, int] = { + "NONE": cv2.aruco.CORNER_REFINE_NONE, + "SUBPIX": cv2.aruco.CORNER_REFINE_SUBPIX, + "CONTOUR": cv2.aruco.CORNER_REFINE_CONTOUR, + "APRILTAG": cv2.aruco.CORNER_REFINE_APRILTAG} + + +CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: dict[int, CornerRefinementMethod] = { + cv2.aruco.CORNER_REFINE_NONE: "NONE", + cv2.aruco.CORNER_REFINE_SUBPIX: "SUBPIX", + cv2.aruco.CORNER_REFINE_CONTOUR: "CONTOUR", + cv2.aruco.CORNER_REFINE_APRILTAG: "APRILTAG"} class CharucoBoardSpecification(BaseModel): @@ -83,3 +107,30 @@ def get_marker_corner_points(self) -> list[list[float]]: def get_marker_ids(self) -> list[int]: num_markers = self.square_count_x * self.square_count_y // 2 return list(range(num_markers)) + + +class MarkerDefinition(BaseModel): + # TODO: This is unused at the time of writing 2025-07-09, deletion should be assessed + + label: str = Field() + representation_single_base64: str = Field() # representation from a single rotation only + + def representation_all_base64(self): + """ + OpenCV ArUco expects to receive all possible rotations of a marker. We generate these programmatically. + """ + representation_single_bytes: bytes = base64.b64decode(self.representation_single_base64) + representation_single_list: list[bool] = list(representation_single_bytes) + representation_single_matrix: numpy.ndarray = numpy.asarray( + a=representation_single_list, + dtype=bool) + marker_side_length_bits: int = int(numpy.sqrt(len(representation_single_list))) + representation_single_matrix = numpy.reshape( + a=representation_single_matrix, + newshape=(marker_side_length_bits, marker_side_length_bits)) + representation_all_list: list[bool] = list(representation_single_matrix.flatten()) + for i in range(3): + representation_single_matrix = numpy.rot90(representation_single_matrix) + representation_all_list += list(representation_single_matrix.flatten()) + representation_all_bytes: bytes = bytes(representation_all_list) + return base64.b64encode(representation_all_bytes) diff --git a/src/common/structures/aruco_board_specification.py b/src/common/structures/aruco_board_specification.py deleted file mode 100644 index b4e5c49..0000000 --- a/src/common/structures/aruco_board_specification.py +++ /dev/null @@ -1,28 +0,0 @@ -from pydantic import BaseModel, validator -from .vec3 import Vec3 -import json, hjson - -class BoardMarker(BaseModel): - marker_id: int - points: list[Vec3] - - @validator - def check_points_length(cls, values): - points = values.get('points') - if points is not None and len(points) != 4: - raise ValueError("The list of points must have exactly four elements") - return values - - -class Board(BaseModel): - board_markers: list[BoardMarker] - -def read_file(input_filepath: str) -> Board: - with open(input_filepath, 'r') as file: - data = hjson.load(file) - return Board(**data) - - -def write_file(output_filepath: str, output_board: Board) -> None: - with open(output_filepath, 'w') as file: - json.dump(output_board.as_dict(), file, indent=4) diff --git a/src/common/structures/capture_format.py b/src/common/structures/capture_format.py deleted file mode 100644 index 3798735..0000000 --- a/src/common/structures/capture_format.py +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Literal - - -CaptureFormat = Literal[".png", ".jpg"] diff --git a/src/common/structures/component_role_label.py b/src/common/structures/component_role_label.py deleted file mode 100644 index edbcf17..0000000 --- a/src/common/structures/component_role_label.py +++ /dev/null @@ -1,5 +0,0 @@ -from typing import Final, Literal - -COMPONENT_ROLE_LABEL_DETECTOR: Final[str] = "detector" -COMPONENT_ROLE_LABEL_POSE_SOLVER: Final[str] = "pose_solver" -ComponentRoleLabel = Literal["detector", "pose_solver"] diff --git a/src/common/structures/corner_refinement.py b/src/common/structures/corner_refinement.py deleted file mode 100644 index 9e6a1bd..0000000 --- a/src/common/structures/corner_refinement.py +++ /dev/null @@ -1,20 +0,0 @@ -import cv2 -from typing import Final, Literal - -CornerRefinementMethod = Literal["NONE", "SUBPIX", "CONTOUR", "APRILTAG"] -CORNER_REFINEMENT_METHOD_NONE: Final[str] = 'NONE' -CORNER_REFINEMENT_METHOD_SUBPIX: Final[str] = 'SUBPIX' -CORNER_REFINEMENT_METHOD_CONTOUR: Final[str] = 'CONTOUR' -CORNER_REFINEMENT_METHOD_APRILTAG: Final[str] = 'APRILTAG' - -CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: dict[CornerRefinementMethod, int] = { - "NONE": cv2.aruco.CORNER_REFINE_NONE, - "SUBPIX": cv2.aruco.CORNER_REFINE_SUBPIX, - "CONTOUR": cv2.aruco.CORNER_REFINE_CONTOUR, - "APRILTAG": cv2.aruco.CORNER_REFINE_APRILTAG} - -CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: dict[int, CornerRefinementMethod] = { - cv2.aruco.CORNER_REFINE_NONE: "NONE", - cv2.aruco.CORNER_REFINE_SUBPIX: "SUBPIX", - cv2.aruco.CORNER_REFINE_CONTOUR: "CONTOUR", - cv2.aruco.CORNER_REFINE_APRILTAG: "APRILTAG"} diff --git a/src/common/structures/detector.py b/src/common/structures/detector.py new file mode 100644 index 0000000..a46962a --- /dev/null +++ b/src/common/structures/detector.py @@ -0,0 +1,51 @@ +from .image import ImageResolution +import datetime +from enum import StrEnum +from pydantic import BaseModel, Field +from typing import Final + + +class CaptureFormat(StrEnum): + FORMAT_PNG: Final[str] = ".png" + FORMAT_JPG: Final[str] = ".jpg" + + +class MarkerCornerImagePoint(BaseModel): + # TODO: Some types of markers may not refer to "corners" per se, so it may be worth renaming this class + x_px: float = Field() + y_px: float = Field() + + +class MarkerSnapshot(BaseModel): + label: str = Field() + corner_image_points: list[MarkerCornerImagePoint] = Field() + + +class DetectorFrame(BaseModel): + detected_marker_snapshots: list[MarkerSnapshot] | None = Field() + rejected_marker_snapshots: list[MarkerSnapshot] | None = Field() + timestamp_utc_iso8601: str = Field() + image_resolution: ImageResolution = Field() + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +class MarkerCorners: + # TODO: Remove this class in favour of DetectorFrame + detector_label: str + marker_id: int + points: list[list[float]] + timestamp: datetime.datetime + + def __init__( + self, + detector_label: str, + marker_id: int, + points: list[list[float]], + timestamp: datetime.datetime + ): + self.detector_label = detector_label + self.marker_id = marker_id + self.points = points + self.timestamp = timestamp diff --git a/src/common/structures/detector_frame.py b/src/common/structures/detector_frame.py deleted file mode 100644 index aef2b36..0000000 --- a/src/common/structures/detector_frame.py +++ /dev/null @@ -1,14 +0,0 @@ -from .image_resolution import ImageResolution -from .marker_snapshot import MarkerSnapshot -import datetime -from pydantic import BaseModel, Field - - -class DetectorFrame(BaseModel): - detected_marker_snapshots: list[MarkerSnapshot] | None = Field() - rejected_marker_snapshots: list[MarkerSnapshot] | None = Field() - timestamp_utc_iso8601: str = Field() - image_resolution: ImageResolution = Field() - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/structures/intrinsic_parameters.py b/src/common/structures/image.py similarity index 60% rename from src/common/structures/intrinsic_parameters.py rename to src/common/structures/image.py index b510a7e..86df23f 100644 --- a/src/common/structures/intrinsic_parameters.py +++ b/src/common/structures/image.py @@ -1,5 +1,48 @@ -from pydantic import BaseModel, Field +from .serialization import KeyValueSimpleAny +from .linear_algebra import Vec3 import math +from pydantic import BaseModel, Field, SerializeAsAny + + +class ImageResolution(BaseModel): + x_px: int = Field() + y_px: int = Field() + + def __eq__(self, other) -> bool: + if type(self) is not type(other): + return False + return \ + self.x_px == other.x_px and \ + self.y_px == other.y_px + + def __hash__(self) -> int: + return hash(str(self)) + + def __lt__(self, other): + if not isinstance(other, ImageResolution): + raise ValueError() + if self.x_px < other.x_px: + return True + elif self.x_px > other.x_px: + return False + elif self.y_px < other.y_px: + return True + else: + return False + + def __str__(self): + return f"{self.x_px}x{self.y_px}" + + @staticmethod + def from_str(in_str: str) -> 'ImageResolution': + if 'x' not in in_str: + raise ValueError("in_str is expected to contain delimiter 'x'.") + parts: list[str] = in_str.split('x') + if len(parts) > 2: + raise ValueError("in_str is expected to contain exactly one 'x'.") + x_px = int(parts[0]) + y_px = int(parts[1]) + return ImageResolution(x_px=x_px, y_px=y_px) class IntrinsicParameters(BaseModel): @@ -68,3 +111,22 @@ def generate_zero_parameters( optical_center_y_px=optical_center_y_px, radial_distortion_coefficients=[0.0, 0.0, 0.0], tangential_distortion_coefficients=[0.0, 0.0]) + + +class IntrinsicCalibrationFrameResult(BaseModel): + image_identifier: str = Field() + translation: Vec3 = Field() + rotation: Vec3 = Field() + translation_stdev: Vec3 = Field() + rotation_stdev: Vec3 = Field() + reprojection_error: float = Field() + + +class IntrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + image_resolution: ImageResolution = Field() + reprojection_error: float = Field() + calibrated_values: IntrinsicParameters = Field() + calibrated_stdevs: list[float] = Field() + marker_parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() + frame_results: list[IntrinsicCalibrationFrameResult] = Field(default=list()) diff --git a/src/common/structures/image_resolution.py b/src/common/structures/image_resolution.py deleted file mode 100644 index 92c4a0a..0000000 --- a/src/common/structures/image_resolution.py +++ /dev/null @@ -1,42 +0,0 @@ -from pydantic import BaseModel, Field - - -class ImageResolution(BaseModel): - x_px: int = Field() - y_px: int = Field() - - def __eq__(self, other) -> bool: - if type(self) is not type(other): - return False - return \ - self.x_px == other.x_px and \ - self.y_px == other.y_px - - def __hash__(self) -> int: - return hash(str(self)) - - def __lt__(self, other): - if not isinstance(other, ImageResolution): - raise ValueError() - if self.x_px < other.x_px: - return True - elif self.x_px > other.x_px: - return False - elif self.y_px < other.y_px: - return True - else: - return False - - def __str__(self): - return f"{self.x_px}x{self.y_px}" - - @staticmethod - def from_str(in_str: str) -> 'ImageResolution': - if 'x' not in in_str: - raise ValueError("in_str is expected to contain delimiter 'x'.") - parts: list[str] = in_str.split('x') - if len(parts) > 2: - raise ValueError("in_str is expected to contain exactly one 'x'.") - x_px = int(parts[0]) - y_px = int(parts[1]) - return ImageResolution(x_px=x_px, y_px=y_px) diff --git a/src/common/structures/intrinsic_calibration.py b/src/common/structures/intrinsic_calibration.py deleted file mode 100644 index 5388aec..0000000 --- a/src/common/structures/intrinsic_calibration.py +++ /dev/null @@ -1,24 +0,0 @@ -from .image_resolution import ImageResolution -from .intrinsic_parameters import IntrinsicParameters -from .key_value_structures import KeyValueSimpleAny -from .vec3 import Vec3 -from pydantic import BaseModel, Field, SerializeAsAny - - -class IntrinsicCalibrationFrameResult(BaseModel): - image_identifier: str = Field() - translation: Vec3 = Field() - rotation: Vec3 = Field() - translation_stdev: Vec3 = Field() - rotation_stdev: Vec3 = Field() - reprojection_error: float = Field() - - -class IntrinsicCalibration(BaseModel): - timestamp_utc: str = Field() - image_resolution: ImageResolution = Field() - reprojection_error: float = Field() - calibrated_values: IntrinsicParameters = Field() - calibrated_stdevs: list[float] = Field() - marker_parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() - frame_results: list[IntrinsicCalibrationFrameResult] = Field(default=list()) diff --git a/src/common/structures/linear_algebra.py b/src/common/structures/linear_algebra.py index 7925e6e..d9a1f7b 100644 --- a/src/common/structures/linear_algebra.py +++ b/src/common/structures/linear_algebra.py @@ -118,3 +118,13 @@ def __init__( raise ValueError("Direction cannot be zero.") self.source_point = source_point self.direction = direction + + +class Vec3(BaseModel): + """ + Simply a container for x, y, and z coordinates. + TODO: This has limited usage, it may be best to delete it + """ + x: float = Field() + y: float = Field() + z: float = Field() diff --git a/src/common/structures/marker_corner_image_point.py b/src/common/structures/marker_corner_image_point.py deleted file mode 100644 index d3a3be0..0000000 --- a/src/common/structures/marker_corner_image_point.py +++ /dev/null @@ -1,6 +0,0 @@ -from pydantic import BaseModel, Field - - -class MarkerCornerImagePoint(BaseModel): - x_px: float = Field() - y_px: float = Field() diff --git a/src/common/structures/marker_corners.py b/src/common/structures/marker_corners.py deleted file mode 100644 index a5d6a0c..0000000 --- a/src/common/structures/marker_corners.py +++ /dev/null @@ -1,20 +0,0 @@ -import datetime - -# TODO: Remove this in favour of DetectorFrame or another data structure -class MarkerCorners: - detector_label: str - marker_id: int - points: list[list[float]] - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - marker_id: int, - points: list[list[float]], - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.marker_id = marker_id - self.points = points - self.timestamp = timestamp diff --git a/src/common/structures/marker_definition.py b/src/common/structures/marker_definition.py deleted file mode 100644 index 96f8f31..0000000 --- a/src/common/structures/marker_definition.py +++ /dev/null @@ -1,28 +0,0 @@ -import base64 -import numpy -from pydantic import BaseModel, Field - - -class MarkerDefinition(BaseModel): - label: str = Field() - representation_single_base64: str = Field() # representation from a single rotation only - - def representation_all_base64(self): - """ - OpenCV ArUco expects to receive all possible rotations of a marker. We generate these programmatically. - """ - representation_single_bytes: bytes = base64.b64decode(self.representation_single_base64) - representation_single_list: list[bool] = list(representation_single_bytes) - representation_single_matrix: numpy.ndarray = numpy.asarray( - a=representation_single_list, - dtype=bool) - marker_side_length_bits: int = int(numpy.sqrt(len(representation_single_list))) - representation_single_matrix = numpy.reshape( - a=representation_single_matrix, - newshape=(marker_side_length_bits, marker_side_length_bits)) - representation_all_list: list[bool] = list(representation_single_matrix.flatten()) - for i in range(3): - representation_single_matrix = numpy.rot90(representation_single_matrix) - representation_all_list += list(representation_single_matrix.flatten()) - representation_all_bytes: bytes = bytes(representation_all_list) - return base64.b64encode(representation_all_bytes) diff --git a/src/common/structures/marker_snapshot.py b/src/common/structures/marker_snapshot.py deleted file mode 100644 index 849297c..0000000 --- a/src/common/structures/marker_snapshot.py +++ /dev/null @@ -1,7 +0,0 @@ -from .marker_corner_image_point import MarkerCornerImagePoint -from pydantic import BaseModel, Field - - -class MarkerSnapshot(BaseModel): - label: str = Field() - corner_image_points: list[MarkerCornerImagePoint] = Field() diff --git a/src/common/structures/mct_component.py b/src/common/structures/mct_component.py new file mode 100644 index 0000000..e81a6fc --- /dev/null +++ b/src/common/structures/mct_component.py @@ -0,0 +1,11 @@ +from enum import StrEnum +from typing import Final + + +COMPONENT_ROLE_LABEL_DETECTOR: Final[str] = "detector" +COMPONENT_ROLE_LABEL_POSE_SOLVER: Final[str] = "pose_solver" +class ComponentRoleLabel(StrEnum): + DETECTOR: Final[str] = COMPONENT_ROLE_LABEL_DETECTOR + POSE_SOLVER: Final[str] = COMPONENT_ROLE_LABEL_POSE_SOLVER + + diff --git a/src/common/structures/mct_parsable.py b/src/common/structures/mct_parsable.py deleted file mode 100644 index d6400f1..0000000 --- a/src/common/structures/mct_parsable.py +++ /dev/null @@ -1,56 +0,0 @@ -from src.common.exceptions import MCTParsingError -import abc -from pydantic import ValidationError -from typing import TypeVar - - -ParsableDynamic = TypeVar('ParsableDynamic', bound='MCTParsable') - - -class MCTParsable(abc.ABC): - - @staticmethod - @abc.abstractmethod - def parsable_type_identifier() -> str: - pass - - @staticmethod - def parse_dynamic_series_list( - parsable_series_dict: dict, - supported_types: list[type[ParsableDynamic]] - ) -> list[ParsableDynamic]: - if "series" not in parsable_series_dict or not isinstance(parsable_series_dict["series"], list): - message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." - raise MCTParsingError(message) - - output_series: list[ParsableDynamic] = list() - for parsable_dict in parsable_series_dict["series"]: - if not isinstance(parsable_dict, dict): - message: str = "series contained a non-dict element. Input is improperly formatted." - raise MCTParsingError(message) - output_series.append(MCTParsable.parse_dynamic_single( - parsable_dict=parsable_dict, - supported_types=supported_types)) - - return output_series - - @staticmethod - def parse_dynamic_single( - parsable_dict: dict, - supported_types: list[type[ParsableDynamic]] - ) -> ParsableDynamic: - if "parsable_type" not in parsable_dict or not isinstance(parsable_dict["parsable_type"], str): - message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." - raise MCTParsingError(message) from None - - for supported_type in supported_types: - if parsable_dict["parsable_type"] == supported_type.parsable_type_identifier(): - request: ParsableDynamic - try: - request = supported_type(**parsable_dict) - except ValidationError as e: - raise MCTParsingError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None - return request - - message: str = "parsable_type did not match any expected value. Input is improperly formatted." - raise MCTParsingError(message) diff --git a/src/common/structures/target.py b/src/common/structures/pose_solver.py similarity index 80% rename from src/common/structures/target.py rename to src/common/structures/pose_solver.py index 3ac2833..386aabe 100644 --- a/src/common/structures/target.py +++ b/src/common/structures/pose_solver.py @@ -1,6 +1,10 @@ +from .linear_algebra import Pose import abc +import datetime +from enum import IntEnum import numpy from pydantic import BaseModel, Field, PrivateAttr +from typing import Final class Marker(BaseModel): @@ -88,3 +92,30 @@ def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: if marker_id not in self._marker_dict: raise IndexError(f"marker_id {marker_id} is not in target {self.target_id}") return self._marker_dict[marker_id].points + + +class PoseSolverStatus: + + class Solve(IntEnum): + STOPPED: Final[int] = 0 + RUNNING: Final[int] = 1 + FAILURE: Final[int] = 2 + + solve_status: Solve + solve_errors: list[str] + + def __init__(self): + self.solve_status = PoseSolverStatus.Solve.STOPPED + self.solve_errors = list() + + def in_runnable_state(self): + return self.solve_status == PoseSolverStatus.Solve.RUNNING + + +class PoseSolverFrame(BaseModel): + detector_poses: list[Pose] | None = Field() + target_poses: list[Pose] | None = Field() + timestamp_utc_iso8601: str = Field() + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/structures/pose_solver_frame.py b/src/common/structures/pose_solver_frame.py deleted file mode 100644 index 54a5433..0000000 --- a/src/common/structures/pose_solver_frame.py +++ /dev/null @@ -1,12 +0,0 @@ -from .linear_algebra import Pose -import datetime -from pydantic import BaseModel, Field - - -class PoseSolverFrame(BaseModel): - detector_poses: list[Pose] | None = Field() - target_poses: list[Pose] | None = Field() - timestamp_utc_iso8601: str = Field() - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/structures/pose_solver_status.py b/src/common/structures/pose_solver_status.py deleted file mode 100644 index d8bc09d..0000000 --- a/src/common/structures/pose_solver_status.py +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Final -from enum import IntEnum - - -class PoseSolverStatus: - - class Solve(IntEnum): - STOPPED: Final[int] = 0 - RUNNING: Final[int] = 1 - FAILURE: Final[int] = 2 - - solve_status: Solve - solve_errors: list[str] - - def __init__(self): - self.solve_status = PoseSolverStatus.Solve.STOPPED - self.solve_errors = list() - - def in_runnable_state(self): - return self.solve_status == PoseSolverStatus.Solve.RUNNING diff --git a/src/common/structures/key_value_structures.py b/src/common/structures/serialization.py similarity index 62% rename from src/common/structures/key_value_structures.py rename to src/common/structures/serialization.py index 8e8cf9e..4c199b2 100644 --- a/src/common/structures/key_value_structures.py +++ b/src/common/structures/serialization.py @@ -1,6 +1,7 @@ +from src.common.exceptions import MCTParsingError import abc -from pydantic import BaseModel, Field -from typing import Final, Literal, Union +from pydantic import BaseModel, Field, ValidationError +from typing import Final, Literal, TypeVar, Union class KeyValueSimpleAbstract(BaseModel, abc.ABC): @@ -133,3 +134,55 @@ def key_value_meta_to_simple( key_value_meta_list: list[KeyValueMetaAny] ) -> list[KeyValueSimpleAny]: return [key_value_meta.to_simple() for key_value_meta in key_value_meta_list] + + +ParsableDynamic = TypeVar('ParsableDynamic', bound='MCTParsable') + + +class MCTParsable(abc.ABC): + + @staticmethod + @abc.abstractmethod + def parsable_type_identifier() -> str: + pass + + @staticmethod + def parse_dynamic_series_list( + parsable_series_dict: dict, + supported_types: list[type[ParsableDynamic]] + ) -> list[ParsableDynamic]: + if "series" not in parsable_series_dict or not isinstance(parsable_series_dict["series"], list): + message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." + raise MCTParsingError(message) + + output_series: list[ParsableDynamic] = list() + for parsable_dict in parsable_series_dict["series"]: + if not isinstance(parsable_dict, dict): + message: str = "series contained a non-dict element. Input is improperly formatted." + raise MCTParsingError(message) + output_series.append(MCTParsable.parse_dynamic_single( + parsable_dict=parsable_dict, + supported_types=supported_types)) + + return output_series + + @staticmethod + def parse_dynamic_single( + parsable_dict: dict, + supported_types: list[type[ParsableDynamic]] + ) -> ParsableDynamic: + if "parsable_type" not in parsable_dict or not isinstance(parsable_dict["parsable_type"], str): + message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." + raise MCTParsingError(message) from None + + for supported_type in supported_types: + if parsable_dict["parsable_type"] == supported_type.parsable_type_identifier(): + request: ParsableDynamic + try: + request = supported_type(**parsable_dict) + except ValidationError as e: + raise MCTParsingError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None + return request + + message: str = "parsable_type did not match any expected value. Input is improperly formatted." + raise MCTParsingError(message) diff --git a/src/common/structures/status_message.py b/src/common/structures/status_message.py deleted file mode 100644 index 76eff42..0000000 --- a/src/common/structures/status_message.py +++ /dev/null @@ -1,25 +0,0 @@ -import logging -from pydantic import BaseModel, Field -from typing import Final, Literal - - -SEVERITY_LABEL_DEBUG: Final[str] = "debug" -SEVERITY_LABEL_INFO: Final[str] = "info" -SEVERITY_LABEL_WARNING: Final[str] = "warning" -SEVERITY_LABEL_ERROR: Final[str] = "error" -SEVERITY_LABEL_CRITICAL: Final[str] = "critical" - -SeverityLabel = Literal["debug", "info", "warning", "error", "critical"] -SEVERITY_LABEL_TO_INT: Final[dict[SeverityLabel, int]] = { - "debug": logging.DEBUG, - "info": logging.INFO, - "warning": logging.WARNING, - "error": logging.ERROR, - "critical": logging.CRITICAL} - - -class StatusMessage(BaseModel): - source_label: str = Field() - severity: SeverityLabel = Field() - message: str - timestamp_utc_iso8601: str diff --git a/src/common/structures/vec3.py b/src/common/structures/vec3.py deleted file mode 100644 index 4445d4d..0000000 --- a/src/common/structures/vec3.py +++ /dev/null @@ -1,7 +0,0 @@ -from pydantic import BaseModel, Field - - -class Vec3(BaseModel): - x: float = Field() - y: float = Field() - z: float = Field() diff --git a/src/common/util/__init__.py b/src/common/util/__init__.py index 42218d6..91954f4 100644 --- a/src/common/util/__init__.py +++ b/src/common/util/__init__.py @@ -1,2 +1,5 @@ +from .image_utils import ImageUtils from .io_utils import IOUtils from .math_utils import MathUtils +from .network_utils import NetworkUtils +from .python_utils import PythonUtils diff --git a/src/common/util/image_utils.py b/src/common/util/image_utils.py new file mode 100644 index 0000000..b4e39f1 --- /dev/null +++ b/src/common/util/image_utils.py @@ -0,0 +1,142 @@ +from src.common.structures import \ + CaptureFormat, \ + ImageResolution +import base64 +import cv2 +import logging +import numpy +from typing import Literal, Final + + +logger = logging.getLogger(__file__) + +ColorMode = Literal["color", "greyscale"] + + +class ImageUtils: + """ + A "class" to group related static functions, like in a namespace. + The class itself is not meant to be instantiated. + """ + + def __init__(self): + raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") + + @staticmethod + def base64_to_image( + input_base64: str, + color_mode: ColorMode = "color" + ) -> numpy.ndarray: + """ + Assumes 8 bits per component + """ + + image_bytes: bytes = base64.b64decode(s=input_base64) + + color_flag: int = 0 + if color_mode == "color": + color_flag |= cv2.IMREAD_COLOR + elif color_mode == "greyscale": + color_flag |= cv2.IMREAD_GRAYSCALE + else: + logger.warning(f"Unsupported color mode specified: {color_mode}") + + opencv_image: numpy.ndarray = cv2.imdecode( + buf=numpy.frombuffer( + buffer=image_bytes, + dtype=numpy.uint8), + flags=color_flag) + return opencv_image + + @staticmethod + def black_image( + resolution_px: tuple[int, int], + ) -> numpy.ndarray: + return numpy.zeros((resolution_px[1], resolution_px[0], 3), dtype=numpy.uint8) + + @staticmethod + def bytes_to_base64( + image_bytes: bytes + ) -> str: + return base64.b64encode(image_bytes).decode("ascii") + + @staticmethod + def image_resize_to_fit( + opencv_image: numpy.ndarray, + available_size: tuple[int, int] # x, y + ) -> numpy.ndarray: + # note: opencv height represented by 1st dimension + source_resolution_px: tuple[int, int] = (opencv_image.shape[1], opencv_image.shape[0]) + image_width_px, image_height_px = ImageUtils.scale_factor_for_available_space_px( + source_resolution_px=source_resolution_px, + available_size_px=available_size) + return cv2.resize( + src=opencv_image, + dsize=(image_width_px, image_height_px)) + + @staticmethod + def image_to_base64( + image_data: numpy.ndarray, + image_format: CaptureFormat = ".png", + ) -> str: + """ + :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) + :param image_format: e.g. ".jpg", ".png"... + :return: base64 string representing the image + """ + encoded_image_rgb_bytes: bytes = ImageUtils.image_to_bytes( + image_data=image_data, + image_format=image_format) + encoded_image_rgb_base64: str = ImageUtils.bytes_to_base64(encoded_image_rgb_bytes) + return encoded_image_rgb_base64 + + @staticmethod + def image_to_bytes( + image_data: numpy.ndarray, + image_format: CaptureFormat = ".png", + ) -> bytes: + """ + :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) + :param image_format: e.g. ".jpg", ".png"... + :return: base64 string representing the image + """ + encoded_image_rgb_single_row: numpy.array + encoded, encoded_image_rgb_single_row = cv2.imencode(image_format, image_data) + encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() + return encoded_image_rgb_bytes + + @staticmethod + def scale_factor_for_available_space_px( + source_resolution_px: tuple[int, int], + available_size_px: tuple[int, int] + ) -> tuple[int, int]: + source_width_px: int = source_resolution_px[0] + source_height_px: int = source_resolution_px[1] + available_width_px: int = available_size_px[0] + available_height_px: int = available_size_px[1] + scale: float = min( + available_width_px / float(source_width_px), + available_height_px / float(source_height_px)) + return int(round(source_width_px * scale)), int(round(source_height_px * scale)) + + class StandardResolutions: + RES_640X360: Final[ImageResolution] = ImageResolution(x_px=640, y_px=360) + RES_640X480: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) + RES_800X600: Final[ImageResolution] = ImageResolution(x_px=800, y_px=600) + RES_1024X768: Final[ImageResolution] = ImageResolution(x_px=1024, y_px=768) + RES_1280X720: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=720) + RES_1280X800: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=800) + RES_1280X1024: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=1024) + RES_1920X1080: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) + + @staticmethod + def as_list(): + return [ + ImageUtils.StandardResolutions.RES_640X360, + ImageUtils.StandardResolutions.RES_640X480, + ImageUtils.StandardResolutions.RES_800X600, + ImageUtils.StandardResolutions.RES_1024X768, + ImageUtils.StandardResolutions.RES_1280X720, + ImageUtils.StandardResolutions.RES_1280X800, + ImageUtils.StandardResolutions.RES_1280X1024, + ImageUtils.StandardResolutions.RES_1920X1080] diff --git a/src/common/util/network_utils.py b/src/common/util/network_utils.py new file mode 100644 index 0000000..0a7aee3 --- /dev/null +++ b/src/common/util/network_utils.py @@ -0,0 +1,17 @@ +from fastapi import Request, WebSocket + + +class NetworkUtils: + """ + A "class" to group related static functions, like in a namespace. + The class itself is not meant to be instantiated. + """ + + def __init__(self): + raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") + + @staticmethod + def client_identifier_from_connection( + connection: Request | WebSocket + ) -> str: + return f"{connection.client.host}:{connection.client.port}" diff --git a/src/common/util/python_utils.py b/src/common/util/python_utils.py new file mode 100644 index 0000000..8c0e799 --- /dev/null +++ b/src/common/util/python_utils.py @@ -0,0 +1,42 @@ +from typing import TypeVar + + +T = TypeVar("T") + + +class PythonUtils: + """ + A "class" to group related static functions, like in a namespace. + The class itself is not meant to be instantiated. + """ + + def __init__(self): + raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") + + @staticmethod + def get_kwarg( + kwargs: dict, + key: str, + arg_type: type[T], + required: bool = True + ) -> T | None: + """ + :param kwargs: kwargs as a dict (without the "**") + :param key: key to search for + :param arg_type: expected type + :param required: If the keyword does not exist, then: + required == True -> Raise ValueError + required == False -> Return None + """ + + if key not in kwargs: + if required: + raise ValueError(f"Missing required key {key} in keyword arguments.") + return None + value: T = kwargs[key] + if not isinstance(value, arg_type): + raise ValueError( + f"Expected keyword argument {key} to be of type {arg_type.__name__}, " + f"but got {type(value).__name__}.") + return value + diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 51aa797..1ba0e61 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -290,9 +290,8 @@ def get_component_labels( None provided to `role` or `active` is treated as a wildcard (i.e. not filtered on that criteria). """ if role is not None: - valid_roles: list[str] = list(get_args(ComponentRoleLabel)) - if role not in valid_roles: - raise ValueError(f"role must be among the valid values {str(valid_roles)}") + if role not in ComponentRoleLabel: + raise ValueError(f"role must be among the valid values for ComponentRoleLabel") return_value: list[str] = list() for connection_label, connection in self._connections.items(): if role is not None and connection.get_role() != role: diff --git a/src/controller/structures/connection.py b/src/controller/structures/connection.py index 0dd90d9..c87efc2 100644 --- a/src/controller/structures/connection.py +++ b/src/controller/structures/connection.py @@ -7,11 +7,10 @@ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ - TimestampGetResponse -from src.common.structures import \ - MCTParsable, \ SeverityLabel, \ - StatusMessage + StatusMessage, \ + TimestampGetResponse +from src.common.structures import MCTParsable import abc import datetime from enum import StrEnum diff --git a/src/controller/structures/mct_component_address.py b/src/controller/structures/mct_component_address.py index 955d7d1..090256f 100644 --- a/src/controller/structures/mct_component_address.py +++ b/src/controller/structures/mct_component_address.py @@ -1,4 +1,4 @@ -from src.common.structures.component_role_label import ComponentRoleLabel +from src.common.structures.mct_component import ComponentRoleLabel from ipaddress import IPv4Address from pydantic import BaseModel, Field diff --git a/src/controller/structures/pose_solver_connection.py b/src/controller/structures/pose_solver_connection.py index 0dd6ea5..6b69231 100644 --- a/src/controller/structures/pose_solver_connection.py +++ b/src/controller/structures/pose_solver_connection.py @@ -1,4 +1,4 @@ -from src.common.structures.pose_solver_frame import PoseSolverFrame +from src.common.structures.pose_solver import PoseSolverFrame from .mct_component_address import MCTComponentAddress from .connection import Connection from src.common.api import \ diff --git a/src/detector/__init__.py b/src/detector/__init__.py index a14ed65..bc81790 100644 --- a/src/detector/__init__.py +++ b/src/detector/__init__.py @@ -1,4 +1,4 @@ -from .calibrator import Calibrator +from .intrinsic_calibrator import IntrinsicCalibrator from .detector import Detector from .interfaces import \ AbstractCamera, \ diff --git a/src/detector/detector.py b/src/detector/detector.py index c1c9967..9ee6ebb 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -33,7 +33,7 @@ MarkerParametersGetRequest, \ MarkerParametersGetResponse, \ MarkerParametersSetRequest -from .calibrator import Calibrator +from .intrinsic_calibrator import IntrinsicCalibrator from .exceptions import \ MCTDetectorRuntimeError from .interfaces import \ @@ -48,10 +48,10 @@ from src.common import \ EmptyResponse, \ ErrorResponse, \ - get_kwarg, \ MCTComponent, \ MCTRequest, \ - MCTResponse + MCTResponse, \ + PythonUtils from src.common.structures import \ DetectorFrame, \ ImageResolution, \ @@ -71,7 +71,7 @@ class Detector(MCTComponent): _detector_configuration: DetectorConfiguration - _calibrator: Calibrator + _calibrator: IntrinsicCalibrator _camera: AbstractCamera _marker: AbstractMarker @@ -88,7 +88,7 @@ def __init__( send_status_messages_to_logger=True) self._detector_configuration = detector_configuration - self._calibrator = Calibrator( + self._calibrator = IntrinsicCalibrator( configuration=detector_configuration.calibrator_configuration, status_message_source=self.get_status_message_source()) self._camera = camera_type( @@ -103,7 +103,7 @@ def __del__(self): self._camera.__del__() def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | ErrorResponse: - request: CalibrationCalculateRequest = get_kwarg( + request: CalibrationCalculateRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationCalculateRequest) @@ -137,7 +137,7 @@ def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | Erro return CalibrationImageAddResponse(image_identifier=image_identifier) def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | ErrorResponse: - request: CalibrationImageGetRequest = get_kwarg( + request: CalibrationImageGetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationImageGetRequest) @@ -149,7 +149,7 @@ def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | Error return CalibrationImageGetResponse(image_base64=image_base64) def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataListResponse | ErrorResponse: - request: CalibrationImageMetadataListRequest = get_kwarg( + request: CalibrationImageMetadataListRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationImageMetadataListRequest) @@ -162,7 +162,7 @@ def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataL return CalibrationImageMetadataListResponse(metadata_list=image_metadata_list) def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationImageMetadataUpdateRequest = get_kwarg( + request: CalibrationImageMetadataUpdateRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationImageMetadataUpdateRequest) @@ -184,7 +184,7 @@ def calibration_resolution_list(self, **_kwargs) -> CalibrationResolutionListRes return CalibrationResolutionListResponse(resolutions=resolutions) def calibration_result_get(self, **kwargs) -> CalibrationResultGetResponse | ErrorResponse: - request: CalibrationResultGetRequest = get_kwarg( + request: CalibrationResultGetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationResultGetRequest) @@ -205,7 +205,7 @@ def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActive return CalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadataListResponse | ErrorResponse: - request: CalibrationResultMetadataListRequest = get_kwarg( + request: CalibrationResultMetadataListRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationResultMetadataListRequest) @@ -218,7 +218,7 @@ def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadat return CalibrationResultMetadataListResponse(metadata_list=result_metadata_list) def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationResultMetadataUpdateRequest = get_kwarg( + request: CalibrationResultMetadataUpdateRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationResultMetadataUpdateRequest) @@ -232,7 +232,7 @@ def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorR return EmptyResponse() def camera_image_get(self, **kwargs) -> CameraImageGetResponse | ErrorResponse: - request: CameraImageGetRequest = get_kwarg( + request: CameraImageGetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CameraImageGetRequest) @@ -256,7 +256,7 @@ def camera_parameters_get(self, **_kwargs) -> CameraParametersGetResponse | Erro return CameraParametersGetResponse(parameters=parameters) def camera_parameters_set(self, **kwargs) -> CameraParametersSetResponse | ErrorResponse: - request: CameraParametersSetRequest = get_kwarg( + request: CameraParametersSetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=CameraParametersSetRequest) @@ -277,7 +277,7 @@ def camera_resolution_get(self, **_kwargs) -> CameraResolutionGetResponse | Erro return CameraResolutionGetResponse(resolution=image_resolution) def detector_frame_get(self, **kwargs) -> DetectorFrameGetResponse | ErrorResponse: - request: DetectorFrameGetRequest = get_kwarg( + request: DetectorFrameGetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=DetectorFrameGetRequest) @@ -318,7 +318,7 @@ def marker_parameters_get(self, **_kwargs) -> MarkerParametersGetResponse | Erro return MarkerParametersGetResponse(parameters=parameters) def marker_parameters_set(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: MarkerParametersSetRequest = get_kwarg( + request: MarkerParametersSetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=MarkerParametersSetRequest) diff --git a/src/detector/detector_app.py b/src/detector/detector_app.py index 5e3ff72..088cd2e 100644 --- a/src/detector/detector_app.py +++ b/src/detector/detector_app.py @@ -1,7 +1,7 @@ from src.common import \ - client_identifier_from_connection, \ EmptyResponse, \ ErrorResponse, \ + NetworkUtils, \ TimestampGetRequest, \ TimestampGetResponse, \ TimeSyncStartRequest, \ @@ -83,14 +83,14 @@ def create_app() -> FastAPI: async def detector_start( http_request: Request ) -> None: - client_identifier: str = client_identifier_from_connection(connection=http_request) + client_identifier: str = NetworkUtils.client_identifier_from_connection(connection=http_request) detector.detector_start(client_identifier=client_identifier) @detector_app.head("/detector/stop") async def detector_stop( http_request: Request ) -> None: - client_identifier: str = client_identifier_from_connection(connection=http_request) + client_identifier: str = NetworkUtils.client_identifier_from_connection(connection=http_request) detector.detector_stop(client_identifier=client_identifier) @detector_app.post("/detector/start_time_sync") diff --git a/src/detector/implementations/camera_opencv_capture_device.py b/src/detector/implementations/camera_opencv_capture_device.py index 59639ce..0b361f7 100644 --- a/src/detector/implementations/camera_opencv_capture_device.py +++ b/src/detector/implementations/camera_opencv_capture_device.py @@ -4,7 +4,7 @@ CameraConfiguration, \ CameraStatus from src.common import \ - StandardResolutions, \ + ImageUtils, \ StatusMessageSource from src.common.structures import \ ImageResolution, \ @@ -35,7 +35,8 @@ # This list is by no means exhaustive, but it should probably # capture a reasonable cross-section of commonly-used camera image resolutions. # Ideally we can query the camera/driver for supported resolutions and use that instead of this list. -_CAMERA_RESOLUTION_ALLOWABLE: Final[list[str]] = [str(resolution) for resolution in StandardResolutions.as_list()] +_CAMERA_RESOLUTION_ALLOWABLE: Final[list[str]] = [ + str(resolution) for resolution in ImageUtils.StandardResolutions.as_list()] _CAMERA_FPS_KEY: Final[str] = "FramesPerSecond" _CAMERA_FPS_DEFAULT: Final[float] = 30.0 _CAMERA_FPS_RANGE_MINIMUM: Final[float] = 1.0 diff --git a/src/detector/interfaces/abstract_camera.py b/src/detector/interfaces/abstract_camera.py index 7df2fc0..6bb9c1b 100644 --- a/src/detector/interfaces/abstract_camera.py +++ b/src/detector/interfaces/abstract_camera.py @@ -1,13 +1,14 @@ from ..structures import \ CameraConfiguration, \ CameraStatus -from src.common import StatusMessageSource +from src.common import \ + SeverityLabel, \ + StatusMessageSource from src.common.structures import \ CaptureFormat, \ ImageResolution, \ KeyValueSimpleAny, \ - KeyValueMetaAbstract, \ - SeverityLabel + KeyValueMetaAbstract import abc import base64 import cv2 diff --git a/src/detector/interfaces/abstract_marker.py b/src/detector/interfaces/abstract_marker.py index 879de65..fd813f7 100644 --- a/src/detector/interfaces/abstract_marker.py +++ b/src/detector/interfaces/abstract_marker.py @@ -1,10 +1,11 @@ from ..structures import \ MarkerConfiguration, \ MarkerStatus -from src.common import StatusMessageSource +from src.common import \ + SeverityLabel, \ + StatusMessageSource from src.common.structures import \ MarkerSnapshot, \ - SeverityLabel, \ KeyValueMetaAny, \ KeyValueSimpleAny import abc diff --git a/src/detector/calibrator.py b/src/detector/intrinsic_calibrator.py similarity index 98% rename from src/detector/calibrator.py rename to src/detector/intrinsic_calibrator.py index 94d7939..ea8944f 100644 --- a/src/detector/calibrator.py +++ b/src/detector/intrinsic_calibrator.py @@ -10,7 +10,8 @@ CalibrationResultState from .util import assign_key_value_list_to_aruco_detection_parameters from src.common import \ - ImageCoding, \ + ImageUtils, \ + IOUtils, \ StatusMessageSource from src.common.structures import \ CharucoBoardSpecification, \ @@ -20,8 +21,6 @@ IntrinsicParameters, \ KeyValueSimpleAny, \ Vec3 -from src.common.util import \ - IOUtils import cv2 import cv2.aruco import datetime @@ -38,7 +37,7 @@ logger = logging.getLogger(__name__) -class Calibrator: +class IntrinsicCalibrator: _configuration: CalibratorConfiguration _calibration_map: dict[ImageResolution, CalibrationMapValue] @@ -75,7 +74,7 @@ def add_image( self, image_base64: str ) -> str: # id of image - image_data: numpy.ndarray = ImageCoding.base64_to_image(input_base64=image_base64, color_mode="color") + image_data: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") map_key: ImageResolution = ImageResolution(x_px=image_data.shape[1], y_px=image_data.shape[0]) # Before making any changes to the calibration map, make sure folders exist, # and that this file does not somehow already exist (highly unlikely) @@ -95,7 +94,7 @@ def add_image( self._calibration_map[map_key].image_metadata_list.append( CalibrationImageMetadata(identifier=image_identifier)) # noinspection PyTypeChecker - image_bytes = ImageCoding.image_to_bytes(image_data=image_data, image_format=Calibrator.IMAGE_FORMAT) + image_bytes = ImageUtils.image_to_bytes(image_data=image_data, image_format=IntrinsicCalibrator.IMAGE_FORMAT) with (open(image_filepath, 'wb') as in_file): in_file.write(image_bytes) self.save() @@ -349,7 +348,7 @@ def get_image( raise MCTDetectorRuntimeError( message=f"Failed to open image {image_identifier} for " f"given resolution {str(matching_image_resolution)}.") - image_base64 = ImageCoding.bytes_to_base64(image_bytes=image_bytes) + image_base64 = ImageUtils.bytes_to_base64(image_bytes=image_bytes) return image_base64 # noinspection DuplicatedCode @@ -465,7 +464,7 @@ def _image_filepath( key_path: str = self._path_for_map_key(map_key=map_key) return os.path.join( key_path, - image_identifier + Calibrator.IMAGE_FORMAT) + image_identifier + IntrinsicCalibrator.IMAGE_FORMAT) def list_resolutions(self) -> list[ImageResolution]: resolutions: list[ImageResolution] = list(self._calibration_map.keys()) @@ -534,7 +533,7 @@ def load(self) -> bool: return True def _map_filepath(self) -> str: - return os.path.join(self._configuration.data_path, Calibrator.CALIBRATION_MAP_FILENAME) + return os.path.join(self._configuration.data_path, IntrinsicCalibrator.CALIBRATION_MAP_FILENAME) def _path_for_map_key( self, @@ -550,7 +549,7 @@ def _result_filepath( key_path: str = self._path_for_map_key(map_key=map_key) return os.path.join( key_path, - result_identifier + Calibrator.RESULT_FORMAT) + result_identifier + IntrinsicCalibrator.RESULT_FORMAT) def save(self) -> None: IOUtils.json_write( diff --git a/src/detector/structures/__init__.py b/src/detector/structures/__init__.py index 19efb74..8d5f961 100644 --- a/src/detector/structures/__init__.py +++ b/src/detector/structures/__init__.py @@ -1,13 +1,16 @@ -from .calibration_configuration import CalibratorConfiguration -from .calibration_image_metadata import CalibrationImageMetadata -from .calibration_image_state import CalibrationImageState -from .calibration_map import CalibrationMap -from .calibration_map_entry import CalibrationMapEntry -from .calibration_map_value import CalibrationMapValue -from .calibration_result_metadata import CalibrationResultMetadata -from .calibration_result_state import CalibrationResultState -from .camera_configuration import CameraConfiguration -from .camera_status import CameraStatus -from .detector_configuration import DetectorConfiguration -from .marker_status import MarkerStatus -from .marker_configuration import MarkerConfiguration +from .calibration_map import \ + CalibrationImageMetadata, \ + CalibrationImageState, \ + CalibrationMap, \ + CalibrationMapEntry, \ + CalibrationMapValue, \ + CalibrationResultMetadata, \ + CalibrationResultState +from .detector_configuration import \ + DetectorConfiguration, \ + CalibratorConfiguration, \ + CameraConfiguration, \ + MarkerConfiguration +from .status import \ + CameraStatus, \ + MarkerStatus diff --git a/src/detector/structures/calibration_configuration.py b/src/detector/structures/calibration_configuration.py deleted file mode 100644 index 95bb8dd..0000000 --- a/src/detector/structures/calibration_configuration.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydantic import BaseModel, Field - - -class CalibratorConfiguration(BaseModel): - data_path: str = Field() diff --git a/src/detector/structures/calibration_image_metadata.py b/src/detector/structures/calibration_image_metadata.py deleted file mode 100644 index d3c2a04..0000000 --- a/src/detector/structures/calibration_image_metadata.py +++ /dev/null @@ -1,10 +0,0 @@ -from .calibration_image_state import CalibrationImageState -import datetime -from pydantic import BaseModel, Field - - -class CalibrationImageMetadata(BaseModel): - identifier: str = Field() - label: str = Field(default_factory=str) - timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) - state: CalibrationImageState = Field(default=CalibrationImageState.SELECT) diff --git a/src/detector/structures/calibration_image_state.py b/src/detector/structures/calibration_image_state.py deleted file mode 100644 index 58218e1..0000000 --- a/src/detector/structures/calibration_image_state.py +++ /dev/null @@ -1,8 +0,0 @@ -from enum import StrEnum -from typing import Final - - -class CalibrationImageState(StrEnum): - IGNORE: Final[int] = "ignore" - SELECT: Final[int] = "select" - DELETE: Final[int] = "delete" # stage for deletion diff --git a/src/detector/structures/calibration_map.py b/src/detector/structures/calibration_map.py index 4e96e02..4aebaed 100644 --- a/src/detector/structures/calibration_map.py +++ b/src/detector/structures/calibration_map.py @@ -1,7 +1,54 @@ -from .calibration_map_entry import CalibrationMapEntry -from .calibration_map_value import CalibrationMapValue from src.common.structures import ImageResolution +import datetime +from enum import StrEnum from pydantic import BaseModel, Field +from typing import Final + + +class CalibrationImageState(StrEnum): + IGNORE: Final[int] = "ignore" + SELECT: Final[int] = "select" + DELETE: Final[int] = "delete" # stage for deletion + + +class CalibrationImageMetadata(BaseModel): + identifier: str = Field() + label: str = Field(default_factory=str) + timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + state: CalibrationImageState = Field(default=CalibrationImageState.SELECT) + + +class CalibrationResultState(StrEnum): + # indicate to use this calibration (as opposed to simply storing it) + # normally there shall only ever be one ACTIVE calibration for a given image resolution + ACTIVE: Final[str] = "active" + + # store the calibration, but don't mark it for use + RETAIN: Final[str] = "retain" + + # stage for deletion + DELETE: Final[str] = "delete" + + +class CalibrationResultMetadata(BaseModel): + identifier: str = Field() + label: str = Field(default_factory=str) + timestamp_utc_iso8601: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + image_identifiers: list[str] = Field(default_factory=list) + state: CalibrationResultState = Field(default=CalibrationResultState.RETAIN) + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +class CalibrationMapValue(BaseModel): + image_metadata_list: list[CalibrationImageMetadata] = Field(default_factory=list) + result_metadata_list: list[CalibrationResultMetadata] = Field(default_factory=list) + + +class CalibrationMapEntry(BaseModel): + key: ImageResolution = Field() + value: CalibrationMapValue = Field() class CalibrationMap(BaseModel): diff --git a/src/detector/structures/calibration_map_entry.py b/src/detector/structures/calibration_map_entry.py deleted file mode 100644 index b04c1af..0000000 --- a/src/detector/structures/calibration_map_entry.py +++ /dev/null @@ -1,8 +0,0 @@ -from .calibration_map_value import CalibrationMapValue -from src.common.structures import ImageResolution -from pydantic import BaseModel, Field - - -class CalibrationMapEntry(BaseModel): - key: ImageResolution = Field() - value: CalibrationMapValue = Field() diff --git a/src/detector/structures/calibration_map_value.py b/src/detector/structures/calibration_map_value.py deleted file mode 100644 index 77159fd..0000000 --- a/src/detector/structures/calibration_map_value.py +++ /dev/null @@ -1,8 +0,0 @@ -from .calibration_image_metadata import CalibrationImageMetadata -from .calibration_result_metadata import CalibrationResultMetadata -from pydantic import BaseModel, Field - - -class CalibrationMapValue(BaseModel): - image_metadata_list: list[CalibrationImageMetadata] = Field(default_factory=list) - result_metadata_list: list[CalibrationResultMetadata] = Field(default_factory=list) diff --git a/src/detector/structures/calibration_result_metadata.py b/src/detector/structures/calibration_result_metadata.py deleted file mode 100644 index cef3265..0000000 --- a/src/detector/structures/calibration_result_metadata.py +++ /dev/null @@ -1,14 +0,0 @@ -from .calibration_result_state import CalibrationResultState -import datetime -from pydantic import BaseModel, Field - - -class CalibrationResultMetadata(BaseModel): - identifier: str = Field() - label: str = Field(default_factory=str) - timestamp_utc_iso8601: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) - image_identifiers: list[str] = Field(default_factory=list) - state: CalibrationResultState = Field(default=CalibrationResultState.RETAIN) - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/detector/structures/calibration_result_state.py b/src/detector/structures/calibration_result_state.py deleted file mode 100644 index 84684fa..0000000 --- a/src/detector/structures/calibration_result_state.py +++ /dev/null @@ -1,14 +0,0 @@ -from enum import StrEnum -from typing import Final - - -class CalibrationResultState(StrEnum): - # indicate to use this calibration (as opposed to simply storing it) - # normally there shall only ever be one ACTIVE calibration for a given image resolution - ACTIVE: Final[str] = "active" - - # store the calibration, but don't mark it for use - RETAIN: Final[str] = "retain" - - # stage for deletion - DELETE: Final[str] = "delete" diff --git a/src/detector/structures/camera_configuration.py b/src/detector/structures/camera_configuration.py deleted file mode 100644 index 453fb80..0000000 --- a/src/detector/structures/camera_configuration.py +++ /dev/null @@ -1,7 +0,0 @@ -from pydantic import BaseModel, Field -from typing import Union - - -class CameraConfiguration(BaseModel): - driver: str = Field() - capture_device: Union[str, int] = Field() # Not used by all drivers (notably it IS used by OpenCV) diff --git a/src/detector/structures/detector_configuration.py b/src/detector/structures/detector_configuration.py index d55845c..a4537f3 100644 --- a/src/detector/structures/detector_configuration.py +++ b/src/detector/structures/detector_configuration.py @@ -1,7 +1,18 @@ -from .calibration_configuration import CalibratorConfiguration -from .camera_configuration import CameraConfiguration -from .marker_configuration import MarkerConfiguration from pydantic import BaseModel, Field +from typing import Union + + +class CalibratorConfiguration(BaseModel): + data_path: str = Field() + + +class CameraConfiguration(BaseModel): + driver: str = Field() + capture_device: Union[str, int] = Field() # Not used by all drivers (notably it IS used by OpenCV) + + +class MarkerConfiguration(BaseModel): + method: str = Field() class DetectorConfiguration(BaseModel): diff --git a/src/detector/structures/marker_configuration.py b/src/detector/structures/marker_configuration.py deleted file mode 100644 index 908afd1..0000000 --- a/src/detector/structures/marker_configuration.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydantic import BaseModel, Field - - -class MarkerConfiguration(BaseModel): - method: str = Field() diff --git a/src/detector/structures/marker_status.py b/src/detector/structures/marker_status.py deleted file mode 100644 index 9387970..0000000 --- a/src/detector/structures/marker_status.py +++ /dev/null @@ -1,8 +0,0 @@ -from typing import Final -from enum import StrEnum - - -class MarkerStatus(StrEnum): - STOPPED: Final[int] = "STOPPED" - RUNNING: Final[int] = "RUNNING" - FAILURE: Final[int] = "FAILURE" diff --git a/src/detector/structures/camera_status.py b/src/detector/structures/status.py similarity index 65% rename from src/detector/structures/camera_status.py rename to src/detector/structures/status.py index 5cd6c87..760712f 100644 --- a/src/detector/structures/camera_status.py +++ b/src/detector/structures/status.py @@ -9,3 +9,9 @@ class CameraStatus(StrEnum): def in_runnable_state(self): return self == CameraStatus.RUNNING + + +class MarkerStatus(StrEnum): + STOPPED: Final[int] = "STOPPED" + RUNNING: Final[int] = "RUNNING" + FAILURE: Final[int] = "FAILURE" diff --git a/src/gui/panels/board_builder_panel.py b/src/gui/panels/board_builder_panel.py index 3dae97a..699ef96 100644 --- a/src/gui/panels/board_builder_panel.py +++ b/src/gui/panels/board_builder_panel.py @@ -1,45 +1,46 @@ -from io import BytesIO -import platform -import uuid +from .base_panel import BasePanel +from .feedback import ImagePanel +from .parameters import \ + ParameterSpinboxFloat, \ + ParameterCheckbox, \ + ParameterText +from .pose_solver_panel import POSE_REPRESENTATIVE_MODEL +from .specialized import GraphicsRenderer +from src.board_builder import BoardBuilder +from src.common.api import \ + EmptyResponse, \ + ErrorResponse, \ + MCTRequestSeries, \ + MCTResponse, \ + MCTResponseSeries +from src.common import \ + ImageUtils, \ + StatusMessageSource +from src.common.structures import \ + DetectorFrame, \ + ImageResolution, \ + MarkerSnapshot, \ + Matrix4x4, \ + PoseSolverFrame, \ + Pose +from src.controller import MCTController +from src.detector.api import \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + CalibrationResultGetActiveResponse +from src.gui.panels.detector_panel import _CAPTURE_FORMAT import cv2 +import datetime +from io import BytesIO import logging -from typing import Final import numpy +import os +import platform +from typing import Final +import uuid import wx import wx.grid -import datetime -import os -from src.common.api.empty_response import EmptyResponse -from src.common.api.error_response import ErrorResponse -from src.common.api.mct_request_series import MCTRequestSeries -from src.common.api.mct_response import MCTResponse -from src.common.api.mct_response_series import MCTResponseSeries -from src.common.image_coding import ImageCoding -from src.common.image_utils import ImageUtils -from src.common.standard_resolutions import StandardResolutions -from src.common.structures.detector_frame import DetectorFrame -from src.common.structures.image_resolution import ImageResolution -from src.common.structures.marker_snapshot import MarkerSnapshot -from src.detector.api import \ - CameraImageGetRequest, \ - CalibrationResultGetActiveResponse -from src.detector.api import CameraImageGetResponse -from src.gui.panels.detector_panel import _CAPTURE_FORMAT - -from .base_panel import BasePanel -from .feedback import ImagePanel -from .parameters import ParameterSpinboxFloat, ParameterCheckbox, ParameterText - -from src.board_builder import BoardBuilder -from src.common.structures import PoseSolverFrame, Pose, Matrix4x4 -from src.controller import MCTController -from src.common import ( - StatusMessageSource -) -from .pose_solver_panel import POSE_REPRESENTATIVE_MODEL -from .specialized import \ - GraphicsRenderer logger = logging.getLogger(__name__) @@ -540,13 +541,13 @@ def _on_reset_button_click(self, event: wx.CommandEvent) -> None: def _process_frame(self, preview: LiveDetectorPreview): # TODO: The Detector should tell us the resolution of the image it operated on. - resolution_str: str = str(StandardResolutions.RES_1280X720) + resolution_str: str = str(ImageUtils.StandardResolutions.RES_1280X720) image_panel = preview.image_panel display_image: numpy.ndarray scale: float | None if self._preview_image_checkbox.checkbox.GetValue() and preview.image is not None: - opencv_image: numpy.ndarray = ImageCoding.base64_to_image(input_base64=preview.image) + opencv_image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=preview.image) display_image: numpy.ndarray = ImageUtils.image_resize_to_fit( opencv_image=opencv_image, available_size=image_panel.GetSize()) @@ -571,7 +572,7 @@ def _process_frame(self, preview: LiveDetectorPreview): if self._annotate_rejected_checkbox.checkbox.GetValue(): self._draw_all_corners(preview.detector_frame.rejected_marker_snapshots, scale, display_image, [127, 191, 255]) - image_buffer: bytes = ImageCoding.image_to_bytes(image_data=display_image, image_format=".jpg") + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") image_buffer_io: BytesIO = BytesIO(image_buffer) wx_image: wx.Image = wx.Image(image_buffer_io) wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/calibrator_panel.py index 67651f9..c828654 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/calibrator_panel.py @@ -11,7 +11,6 @@ from src.common import \ ErrorResponse, \ EmptyResponse, \ - ImageCoding, \ ImageUtils, \ MCTRequestSeries, \ MCTResponse, \ @@ -344,11 +343,11 @@ def _handle_response_get_calibration_image( self, response: CalibrationImageGetResponse ) -> None: - opencv_image = ImageCoding.base64_to_image(input_base64=response.image_base64) + opencv_image = ImageUtils.base64_to_image(input_base64=response.image_base64) opencv_image = ImageUtils.image_resize_to_fit( opencv_image=opencv_image, available_size=self._image_panel.GetSize()) - image_buffer: bytes = ImageCoding.image_to_bytes(image_data=opencv_image, image_format=".jpg") + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=opencv_image, image_format=".jpg") image_buffer_io: BytesIO = BytesIO(image_buffer) wx_image: wx.Image = wx.Image(image_buffer_io) wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() diff --git a/src/gui/panels/controller_panel.py b/src/gui/panels/controller_panel.py index 9344cb7..f172b8f 100644 --- a/src/gui/panels/controller_panel.py +++ b/src/gui/panels/controller_panel.py @@ -4,9 +4,8 @@ LogPanel from src.common import \ DequeueStatusMessagesResponse, \ + StatusMessage, \ StatusMessageSource -from src.common.structures import \ - StatusMessage from src.controller import \ MCTController, \ ConnectionReport diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index bb44b96..55e9bbd 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -10,12 +10,10 @@ from src.common import \ ErrorResponse, \ EmptyResponse, \ - ImageCoding, \ ImageUtils, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ - StandardResolutions, \ StatusMessageSource from src.common.structures import \ CaptureFormat, \ @@ -50,9 +48,9 @@ _UPDATE_INTERVAL_MILLISECONDS: Final[int] = 16 _SUPPORTED_RESOLUTIONS: Final[list[ImageResolution]] = [ - StandardResolutions.RES_640X480, - StandardResolutions.RES_1280X720, - StandardResolutions.RES_1920X1080] + ImageUtils.StandardResolutions.RES_640X480, + ImageUtils.StandardResolutions.RES_1280X720, + ImageUtils.StandardResolutions.RES_1920X1080] _SUPPORTED_FPS: Final[list[str]] = [ "15", "30", @@ -521,7 +519,7 @@ def _update_ui_image(self): else: scale: float | None if self._live_preview_image_base64 is not None: - opencv_image: numpy.ndarray = ImageCoding.base64_to_image(input_base64=self._live_preview_image_base64) + opencv_image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=self._live_preview_image_base64) display_image: numpy.ndarray = ImageUtils.image_resize_to_fit( opencv_image=opencv_image, available_size=self._image_panel.GetSize()) @@ -556,7 +554,7 @@ def _update_ui_image(self): color=[127, 191, 255], # orange thickness=2) - image_buffer: bytes = ImageCoding.image_to_bytes(image_data=display_image, image_format=".jpg") + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") image_buffer_io: BytesIO = BytesIO(image_buffer) wx_image: wx.Image = wx.Image(image_buffer_io) wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() diff --git a/src/gui/panels/specialized/log_panel.py b/src/gui/panels/specialized/log_panel.py index 5f18c49..3b2bf32 100644 --- a/src/gui/panels/specialized/log_panel.py +++ b/src/gui/panels/specialized/log_panel.py @@ -1,4 +1,4 @@ -from src.common.structures import StatusMessage +from src.common import StatusMessage import wx import wx.grid diff --git a/src/pose_solver/pose_solver.py b/src/pose_solver/pose_solver.py index 60372bb..6903eeb 100644 --- a/src/pose_solver/pose_solver.py +++ b/src/pose_solver/pose_solver.py @@ -4,6 +4,7 @@ DetectorRecord, \ DetectorFrameRecord, \ PoseSolverParameters +from src.common import MathUtils from src.common.structures import \ DetectorFrame, \ IntrinsicParameters, \ @@ -12,7 +13,6 @@ Pose, \ Ray, \ TargetBase -from src.common.util import MathUtils import cv2 import cv2.aruco import datetime diff --git a/src/pose_solver/pose_solver_api.py b/src/pose_solver/pose_solver_api.py index 2886601..806ffd5 100644 --- a/src/pose_solver/pose_solver_api.py +++ b/src/pose_solver/pose_solver_api.py @@ -16,10 +16,10 @@ from src.common import \ EmptyResponse, \ ErrorResponse, \ - get_kwarg, \ MCTComponent, \ MCTRequest, \ - MCTResponse + MCTResponse, \ + PythonUtils from src.common.structures import \ Pose, \ PoseSolverStatus @@ -49,7 +49,7 @@ def __init__( self._status = PoseSolverStatus() def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddDetectorFrameRequest = get_kwarg( + request: PoseSolverAddDetectorFrameRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverAddDetectorFrameRequest) @@ -62,7 +62,7 @@ def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def add_target(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddTargetMarkerRequest = get_kwarg( + request: PoseSolverAddTargetMarkerRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverAddTargetMarkerRequest) @@ -84,7 +84,7 @@ def get_poses(self, **_kwargs) -> PoseSolverGetPosesResponse | ErrorResponse: target_poses=target_poses) def set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetExtrinsicRequest = get_kwarg( + request: PoseSolverSetExtrinsicRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetExtrinsicRequest) @@ -97,7 +97,7 @@ def set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def set_intrinsic_parameters(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetIntrinsicRequest = get_kwarg( + request: PoseSolverSetIntrinsicRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetIntrinsicRequest) @@ -110,7 +110,7 @@ def set_intrinsic_parameters(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetReferenceRequest = get_kwarg( + request: PoseSolverSetReferenceRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetReferenceRequest) @@ -121,7 +121,7 @@ def set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetTargetsRequest = get_kwarg( + request: PoseSolverSetTargetsRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetTargetsRequest) diff --git a/src/slicer_connection.py b/src/slicer_connection.py index 5888790..ca7901a 100644 --- a/src/slicer_connection.py +++ b/src/slicer_connection.py @@ -7,8 +7,8 @@ import logging from src.common.api.mct_request_series import MCTRequestSeries -from src.common.structures.component_role_label import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER -from src.common.structures.target import TargetBase +from src.common.structures.mct_component import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER +from src.common.structures import TargetBase from src.controller.mct_controller import MCTController from src.controller.structures.mct_component_address import MCTComponentAddress from ipaddress import IPv4Address diff --git a/src/util/generate_target_definition_from_charuco.py b/src/util/generate_target_definition_from_charuco.py index 7f3c354..9669876 100644 --- a/src/util/generate_target_definition_from_charuco.py +++ b/src/util/generate_target_definition_from_charuco.py @@ -1,18 +1,18 @@ from src.common.structures import \ CharucoBoardSpecification, \ TargetBoard -from src.common.structures.target import _Marker +from src.common.structures import Marker board: CharucoBoardSpecification = CharucoBoardSpecification() points: list[list[float]] = board.get_marker_corner_points() -markers: list[_Marker] = list() +markers: list[Marker] = list() POINTS_PER_MARKER: int = 4 marker_count: int = round(int(len(points) / POINTS_PER_MARKER)) for marker_index in range(marker_count): point_start_index: int = marker_index * POINTS_PER_MARKER marker_points = points[point_start_index: point_start_index + POINTS_PER_MARKER] - markers.append(_Marker( + markers.append(Marker( marker_id=f"{marker_index}", points=marker_points)) target: TargetBoard = TargetBoard( diff --git a/src/util/measure_detector_to_reference.py b/src/util/measure_detector_to_reference.py index de29a79..fac284b 100644 --- a/src/util/measure_detector_to_reference.py +++ b/src/util/measure_detector_to_reference.py @@ -11,7 +11,7 @@ from scipy.spatial.transform import Rotation as R from src.board_builder.board_builder import BoardBuilder -from src.common.structures.component_role_label import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER +from src.common.structures.mct_component import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER from src.controller.mct_controller import MCTController from src.controller.structures.mct_component_address import MCTComponentAddress from src.pose_solver.util import average_quaternion, average_vector diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index 5eb4206..e00621a 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -2,7 +2,7 @@ import numpy import os import re -from src.common import ImageCoding, StatusMessageSource +from src.common import ImageUtils, StatusMessageSource from src.common.structures import \ CORNER_REFINEMENT_METHOD_SUBPIX, \ ImageResolution, \ @@ -10,7 +10,7 @@ KeyValueSimpleAny, \ KeyValueSimpleString, \ MarkerSnapshot -from src.detector import Calibrator +from src.detector import IntrinsicCalibrator from src.detector.implementations.marker_aruco_opencv import ArucoOpenCVMarker from src.detector.structures import CalibratorConfiguration from src.detector.util import KEY_CORNER_REFINEMENT_METHOD @@ -68,7 +68,7 @@ def test(self): # We'll use all images from the A# and B# sets of frames. calibration_result: IntrinsicCalibration | None = None with TemporaryDirectory() as temppath: - calibrator: Calibrator = Calibrator( + calibrator: IntrinsicCalibrator = IntrinsicCalibrator( configuration=CalibratorConfiguration(data_path=temppath), status_message_source=status_message_source) for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): @@ -76,7 +76,7 @@ def test(self): if not frame_id.startswith("A") and not frame_id.startswith("B"): continue image: numpy.ndarray = cv2.imread(image_filepath) - image_base64: str = ImageCoding.image_to_base64(image) + image_base64: str = ImageUtils.image_to_base64(image) calibrator.add_image(image_base64) _, calibration_result = calibrator.calculate( image_resolution=IMAGE_RESOLUTION, diff --git a/test/test_math_utils.py b/test/test_math_utils.py index 6469826..4b86ba0 100644 --- a/test/test_math_utils.py +++ b/test/test_math_utils.py @@ -1,4 +1,4 @@ -from src.common.util import MathUtils +from src.common import MathUtils from src.common.structures import \ IterativeClosestPointParameters, \ Ray From 75f6f2a228cd71ff4ca3c4bf267b9e44acf700d5 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 10 Jul 2025 11:18:31 -0400 Subject: [PATCH 05/33] WIP: Further consolidation, this time in controller --- src/board_builder/board_builder.py | 5 +- src/board_builder/structures/__init__.py | 1 + .../utils/board_builder_pose_solver.py | 2 +- src/common/structures/__init__.py | 1 - src/common/structures/detector.py | 20 -- src/common/structures/pose_solver.py | 3 - src/controller/__init__.py | 13 +- .../mct_configuration.py => configuration.py} | 9 +- src/controller/{structures => }/connection.py | 262 +++++++++++++++++- ...e_series_not_expected.py => exceptions.py} | 0 src/controller/exceptions/__init__.py | 1 - src/controller/mct_controller.py | 21 +- src/controller/structures/__init__.py | 9 - .../structures/connection_report.py | 22 -- .../structures/detector_connection.py | 122 -------- .../structures/mct_component_address.py | 14 - .../structures/pose_solver_connection.py | 94 ------- src/controller/structures/startup_mode.py | 7 - src/detector/structures/status.py | 3 - src/gui/panels/controller_panel.py | 8 +- .../panels/specialized/connection_table.py | 7 +- src/slicer_connection.py | 5 +- src/util/measure_detector_to_reference.py | 20 +- 23 files changed, 304 insertions(+), 345 deletions(-) rename src/controller/{structures/mct_configuration.py => configuration.py} (83%) rename src/controller/{structures => }/connection.py (67%) rename src/controller/{exceptions/response_series_not_expected.py => exceptions.py} (100%) delete mode 100644 src/controller/exceptions/__init__.py delete mode 100644 src/controller/structures/__init__.py delete mode 100644 src/controller/structures/connection_report.py delete mode 100644 src/controller/structures/detector_connection.py delete mode 100644 src/controller/structures/mct_component_address.py delete mode 100644 src/controller/structures/pose_solver_connection.py delete mode 100644 src/controller/structures/startup_mode.py diff --git a/src/board_builder/board_builder.py b/src/board_builder/board_builder.py index 1ae08d0..7a7bf34 100644 --- a/src/board_builder/board_builder.py +++ b/src/board_builder/board_builder.py @@ -7,13 +7,14 @@ from typing import Final from .utils import BoardBuilderPoseSolver -from .structures import PoseLocation -from src.common.structures import Pose, MarkerSnapshot, MarkerCorners, Matrix4x4 +from .structures import PoseLocation, MarkerCorners +from src.common.structures import Pose, MarkerSnapshot, Matrix4x4 from src.common.structures import Marker, TargetBoard _HOMOGENEOUS_POINT_COORD: Final[int] = 4 TESTED_BOARD_NAME: str = 'top_data.json' # If collecting data for repeatability test, specify the file name. cube_data.json, planar_data.json, top_data.json + class BoardBuilder: _detector_poses_median: dict[str, PoseLocation] _detector_poses: list[Pose] diff --git a/src/board_builder/structures/__init__.py b/src/board_builder/structures/__init__.py index daa65e3..563af0d 100644 --- a/src/board_builder/structures/__init__.py +++ b/src/board_builder/structures/__init__.py @@ -1,3 +1,4 @@ +from .marker_corners import MarkerCorners from .marker_ray_set import MarkerRaySet from .pose_data import PoseData from .pose_location import PoseLocation diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py index 5d3ef53..396418d 100644 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ b/src/board_builder/utils/board_builder_pose_solver.py @@ -1,4 +1,5 @@ from src.board_builder.structures import \ + MarkerCorners, \ MarkerRaySet, \ PoseData, \ PoseLocation @@ -7,7 +8,6 @@ CharucoBoardSpecification, \ IntrinsicParameters, \ IterativeClosestPointParameters, \ - MarkerCorners, \ Matrix4x4, \ Pose, \ Ray, \ diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py index b2549b0..684ab26 100644 --- a/src/common/structures/__init__.py +++ b/src/common/structures/__init__.py @@ -11,7 +11,6 @@ from .detector import \ CaptureFormat, \ DetectorFrame, \ - MarkerCorners, \ MarkerCornerImagePoint, \ MarkerSnapshot from .image import \ diff --git a/src/common/structures/detector.py b/src/common/structures/detector.py index a46962a..adb5ade 100644 --- a/src/common/structures/detector.py +++ b/src/common/structures/detector.py @@ -29,23 +29,3 @@ class DetectorFrame(BaseModel): def timestamp_utc(self): return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) - - -class MarkerCorners: - # TODO: Remove this class in favour of DetectorFrame - detector_label: str - marker_id: int - points: list[list[float]] - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - marker_id: int, - points: list[list[float]], - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.marker_id = marker_id - self.points = points - self.timestamp = timestamp diff --git a/src/common/structures/pose_solver.py b/src/common/structures/pose_solver.py index 386aabe..79471a6 100644 --- a/src/common/structures/pose_solver.py +++ b/src/common/structures/pose_solver.py @@ -108,9 +108,6 @@ def __init__(self): self.solve_status = PoseSolverStatus.Solve.STOPPED self.solve_errors = list() - def in_runnable_state(self): - return self.solve_status == PoseSolverStatus.Solve.RUNNING - class PoseSolverFrame(BaseModel): detector_poses: list[Pose] | None = Field() diff --git a/src/controller/__init__.py b/src/controller/__init__.py index 56390e1..1e10a09 100644 --- a/src/controller/__init__.py +++ b/src/controller/__init__.py @@ -1,5 +1,10 @@ -from .mct_controller import MCTController -from .structures import \ - MCTComponentAddress, \ +from .connection import \ Connection, \ - ConnectionReport + DetectorConnection, \ + PoseSolverConnection +from .configuration import \ + MCTComponentConfig, \ + MCTConfiguration, \ + StartupMode +from .exceptions import ResponseSeriesNotExpected +from .mct_controller import MCTController diff --git a/src/controller/structures/mct_configuration.py b/src/controller/configuration.py similarity index 83% rename from src/controller/structures/mct_configuration.py rename to src/controller/configuration.py index 3e26675..50ece83 100644 --- a/src/controller/structures/mct_configuration.py +++ b/src/controller/configuration.py @@ -1,11 +1,16 @@ -from .startup_mode import StartupMode from src.common.structures import \ KeyValueSimpleAny, \ Matrix4x4, \ TargetBoard, \ TargetMarker +from enum import StrEnum from pydantic import BaseModel, Field, SerializeAsAny -from typing import Union +from typing import Final, Union + + +class StartupMode(StrEnum): + DETECTING_ONLY: Final[str] = "detecting_only" + DETECTING_AND_SOLVING: Final[str] = "detecting_and_solving" class MCTComponentConfig(BaseModel): diff --git a/src/controller/structures/connection.py b/src/controller/connection.py similarity index 67% rename from src/controller/structures/connection.py rename to src/controller/connection.py index c87efc2..d9e5e9d 100644 --- a/src/controller/structures/connection.py +++ b/src/controller/connection.py @@ -1,19 +1,53 @@ -from .mct_component_address import MCTComponentAddress -from .connection_report import ConnectionReport from src.common import \ DequeueStatusMessagesResponse, \ EmptyResponse, \ ErrorResponse, \ + MCTRequest, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ SeverityLabel, \ StatusMessage, \ TimestampGetResponse -from src.common.structures import MCTParsable +from src.common.structures import \ + ComponentRoleLabel, \ + DetectorFrame, \ + ImageResolution, \ + IntrinsicParameters, \ + KeyValueSimpleAny, \ + Matrix4x4, \ + MCTParsable, \ + Pose, \ + PoseSolverFrame, \ + TargetBase +from src.detector.api import \ + CalibrationCalculateResponse, \ + CalibrationImageAddResponse, \ + CalibrationImageGetResponse, \ + CalibrationImageMetadataListResponse, \ + CalibrationResolutionListResponse, \ + CalibrationResultGetResponse, \ + CalibrationResultGetActiveResponse, \ + CalibrationResultMetadataListResponse, \ + CameraImageGetResponse, \ + CameraParametersGetResponse, \ + CameraParametersSetRequest, \ + CameraParametersSetResponse, \ + CameraResolutionGetResponse, \ + DetectorFrameGetResponse, \ + DetectorStartRequest, \ + DetectorStopRequest, \ + MarkerParametersGetResponse, \ + MarkerParametersSetRequest +from src.pose_solver.api import \ + PoseSolverGetPosesResponse, \ + PoseSolverSetTargetsRequest, \ + PoseSolverStartRequest, \ + PoseSolverStopRequest import abc import datetime from enum import StrEnum +from ipaddress import IPv4Address import json from typing import Final import uuid @@ -46,6 +80,24 @@ class State(StrEnum): FAILURE_DISCONNECTING: Final[str] = "Failure - Disconnecting" FAILURE_DEINITIALIZING: Final[str] = "Failure - Deinitializing" + class ComponentAddress: + """ + Information used to establish a connection, + there is nothing that should change here without a user's explicit input. + """ + + def __init__( + self, + label: str, + role: ComponentRoleLabel, + ip_address: IPv4Address, + port: int + ): + self.label = label + self.role = role + self.ip_address = ip_address + self.port = port + class ConnectionResult: success: bool error_message: str @@ -89,8 +141,42 @@ def __init__( self.status = status self.response_series = response_series + class Report: + """ + Human-readable information that shall be shown to a user about a connection. + """ + label: str + role: ComponentRoleLabel + ip_address: str + port: int + status: str + + def __init__( + self, + label: str, + role: ComponentRoleLabel, + ip_address: str, + port: int, + status: str + ): + self.label = label + self.role = role + self.ip_address = ip_address + self.port = port + self.status = status + + def __eq__(self, other): + if not isinstance(other, Connection.Report): + return False + return ( + self.label == other.label and + self.role == other.role and + self.ip_address == other.ip_address and + self.port == other.port and + self.status == other.status) + # treat as immutable - _component_address: MCTComponentAddress + _component_address: ComponentAddress _state: State @@ -115,7 +201,7 @@ def __init__( def __init__( self, - component_address: MCTComponentAddress + component_address: ComponentAddress ): self._component_address = component_address @@ -181,8 +267,8 @@ def get_current_state(self) -> str: def get_label(self) -> str: return self._component_address.label - def get_report(self) -> ConnectionReport: - return ConnectionReport( + def get_report(self) -> Report: + return Connection.Report( label=self._component_address.label, role=self._component_address.role, ip_address=str(self._component_address.ip_address), @@ -477,3 +563,165 @@ def _update_in_reconnecting_state(self) -> None: def _update_in_running_state(self) -> None: self._send_recv() + + +class DetectorConnection(Connection): + + configured_transform_to_reference: Matrix4x4 | None + configured_camera_parameters: list[KeyValueSimpleAny] | None + configured_marker_parameters: list[KeyValueSimpleAny] | None + + # These are variables used directly by the MCTController for storing data + request_id: uuid.UUID | None + current_resolution: ImageResolution | None + current_intrinsic_parameters: IntrinsicParameters | None + latest_frame: DetectorFrame | None + recording: list[DetectorFrame] | None + + def __init__( + self, + component_address: Connection.ComponentAddress + ): + super().__init__(component_address=component_address) + + self.configured_transform_to_reference = None + self.configured_camera_parameters = None + self.configured_marker_parameters = None + + self.request_id = None + self.current_resolution = None + self.current_intrinsic_parameters = None + self.latest_frame = None + self.recording = [] + + def create_deinitialization_request_series(self) -> MCTRequestSeries: + return MCTRequestSeries(series=[DetectorStopRequest()]) + + def create_initialization_request_series(self) -> MCTRequestSeries: + series: list[MCTRequest] = [DetectorStartRequest()] + if self.configured_camera_parameters is not None: + series.append(CameraParametersSetRequest(parameters=self.configured_camera_parameters)) + if self.configured_marker_parameters is not None: + series.append(MarkerParametersSetRequest(parameters=self.configured_marker_parameters)) + return MCTRequestSeries(series=series) + + def handle_deinitialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.DeinitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity="warning", + message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], (EmptyResponse, CameraParametersSetResponse)): + self.enqueue_status_message( + severity="warning", + message=f"The deinitialization response was not of the expected type EmptyResponse.") + return Connection.DeinitializationResult.SUCCESS + + def handle_initialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.InitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity="warning", + message=f"Expected exactly one response to initialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], EmptyResponse): + self.enqueue_status_message( + severity="warning", + message=f"The initialization response was not of the expected type EmptyResponse.") + return Connection.InitializationResult.SUCCESS + + def supported_response_types(self) -> list[type[MCTResponse]]: + return super().supported_response_types() + [ + CalibrationCalculateResponse, + CalibrationImageAddResponse, + CalibrationImageGetResponse, + CalibrationImageMetadataListResponse, + CalibrationResolutionListResponse, + CalibrationResultGetResponse, + CalibrationResultGetActiveResponse, + CalibrationResultMetadataListResponse, + CameraImageGetResponse, + CameraParametersGetResponse, + CameraParametersSetResponse, + CameraResolutionGetResponse, + DetectorFrameGetResponse, + MarkerParametersGetResponse] + + +class PoseSolverConnection(Connection): + + # These are variables used directly by the MCTController for storing data + + configured_solver_parameters: list[KeyValueSimpleAny] | None + configured_targets: list[TargetBase] | None + + request_id: uuid.UUID | None + detector_poses: list[Pose] + target_poses: list[Pose] + detector_timestamps: dict[str, datetime.datetime] # access by detector_label + poses_timestamp: datetime.datetime + recording: list[PoseSolverFrame] | None + + def __init__( + self, + component_address: Connection.ComponentAddress + ): + super().__init__(component_address=component_address) + + self.configured_solver_parameters = None + self.configured_targets = None + + self.request_id = None + self.detector_poses = list() + self.target_poses = list() + self.detector_timestamps = dict() + self.poses_timestamp = datetime.datetime.min + self.recording = [] + + def create_deinitialization_request_series(self) -> MCTRequestSeries: + return MCTRequestSeries(series=[PoseSolverStopRequest()]) + + def create_initialization_request_series(self) -> MCTRequestSeries: + series: list[MCTRequest] = [PoseSolverStartRequest()] + if self.configured_targets is not None: + series.append(PoseSolverSetTargetsRequest(targets=self.configured_targets)) + return MCTRequestSeries(series=series) + + def handle_deinitialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.DeinitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity="warning", + message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], EmptyResponse): + self.enqueue_status_message( + severity="warning", + message=f"The deinitialization response was not of the expected type EmptyResponse.") + return Connection.DeinitializationResult.SUCCESS + + def handle_initialization_response_series( + self, + response_series: MCTResponseSeries + ) -> Connection.InitializationResult: + response_count: int = len(response_series.series) + if response_count != 1: + self.enqueue_status_message( + severity="warning", + message=f"Expected exactly one response to initialization requests. Got {response_count}.") + elif not isinstance(response_series.series[0], EmptyResponse): + self.enqueue_status_message( + severity="warning", + message=f"The initialization response was not of the expected type EmptyResponse.") + return Connection.InitializationResult.SUCCESS + + def supported_response_types(self) -> list[type[MCTResponse]]: + return super().supported_response_types() + [ + PoseSolverGetPosesResponse] diff --git a/src/controller/exceptions/response_series_not_expected.py b/src/controller/exceptions.py similarity index 100% rename from src/controller/exceptions/response_series_not_expected.py rename to src/controller/exceptions.py diff --git a/src/controller/exceptions/__init__.py b/src/controller/exceptions/__init__.py deleted file mode 100644 index d9141c4..0000000 --- a/src/controller/exceptions/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .response_series_not_expected import ResponseSeriesNotExpected diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 1ba0e61..f59e9e0 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -1,13 +1,12 @@ from .exceptions import ResponseSeriesNotExpected -from .structures import \ - ConnectionReport, \ - Connection, \ - DetectorConnection, \ - MCTComponentAddress, \ +from .configuration import \ MCTComponentConfig, \ MCTConfiguration, \ - PoseSolverConnection, \ StartupMode +from .connection import \ + Connection, \ + DetectorConnection, \ + PoseSolverConnection from src.common import \ EmptyResponse, \ ErrorResponse, \ @@ -128,7 +127,7 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: for detector in configuration.detectors: if not is_valid_ip_address(detector): continue - component_address: MCTComponentAddress = MCTComponentAddress( + component_address: Connection.ComponentAddress = Connection.ComponentAddress( label=detector.label, role="detector", ip_address=detector.ip_address, @@ -143,7 +142,7 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: for pose_solver in configuration.pose_solvers: if not is_valid_ip_address(pose_solver): continue - component_address: MCTComponentAddress = MCTComponentAddress( + component_address: Connection.ComponentAddress = Connection.ComponentAddress( label=pose_solver.label, role="pose_solver", ip_address=pose_solver.ip_address, @@ -156,7 +155,7 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: def add_connection( self, - component_address: MCTComponentAddress + component_address: Connection.ComponentAddress ) -> DetectorConnection | PoseSolverConnection: label = component_address.label if label in self._connections: @@ -301,8 +300,8 @@ def get_component_labels( return_value.append(connection_label) return return_value - def get_connection_reports(self) -> list[ConnectionReport]: - return_value: list[ConnectionReport] = list() + def get_connection_reports(self) -> list[Connection.Report]: + return_value: list[Connection.Report] = list() for connection in self._connections.values(): return_value.append(connection.get_report()) return return_value diff --git a/src/controller/structures/__init__.py b/src/controller/structures/__init__.py deleted file mode 100644 index 3f7735b..0000000 --- a/src/controller/structures/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .connection import Connection -from .connection_report import ConnectionReport -from .detector_connection import DetectorConnection -from .mct_component_address import MCTComponentAddress -from .mct_configuration import \ - MCTComponentConfig, \ - MCTConfiguration -from .pose_solver_connection import PoseSolverConnection -from .startup_mode import StartupMode diff --git a/src/controller/structures/connection_report.py b/src/controller/structures/connection_report.py deleted file mode 100644 index b4573af..0000000 --- a/src/controller/structures/connection_report.py +++ /dev/null @@ -1,22 +0,0 @@ -from pydantic import BaseModel, Field - - -class ConnectionReport(BaseModel): - """ - Human-readable information that shall be shown to a user about a connection. - """ - label: str = Field() - role: str = Field() - ip_address: str = Field() - port: int = Field() - status: str = Field() - - def __eq__(self, other): - if not isinstance(other, ConnectionReport): - return False - return ( - self.label == other.label and - self.role == other.role and - self.ip_address == other.ip_address and - self.port == other.port and - self.status == other.status) diff --git a/src/controller/structures/detector_connection.py b/src/controller/structures/detector_connection.py deleted file mode 100644 index c15fad1..0000000 --- a/src/controller/structures/detector_connection.py +++ /dev/null @@ -1,122 +0,0 @@ -from .mct_component_address import MCTComponentAddress -from .connection import Connection -from src.common.api import \ - EmptyResponse, \ - MCTRequest, \ - MCTRequestSeries, \ - MCTResponse, \ - MCTResponseSeries -from src.common.structures import \ - DetectorFrame, \ - ImageResolution, \ - IntrinsicParameters, \ - KeyValueSimpleAny, \ - Matrix4x4 -from src.detector.api import \ - CalibrationCalculateResponse, \ - CalibrationImageAddResponse, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListResponse, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetResponse, \ - CalibrationResultGetActiveResponse, \ - CalibrationResultMetadataListResponse, \ - CameraImageGetResponse, \ - CameraParametersGetResponse, \ - CameraParametersSetRequest, \ - CameraParametersSetResponse, \ - CameraResolutionGetResponse, \ - DetectorFrameGetResponse, \ - DetectorStartRequest, \ - DetectorStopRequest, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest -import uuid - - -class DetectorConnection(Connection): - - configured_transform_to_reference: Matrix4x4 | None - configured_camera_parameters: list[KeyValueSimpleAny] | None - configured_marker_parameters: list[KeyValueSimpleAny] | None - - # These are variables used directly by the MCTController for storing data - request_id: uuid.UUID | None - current_resolution: ImageResolution | None - current_intrinsic_parameters: IntrinsicParameters | None - latest_frame: DetectorFrame | None - recording: list[DetectorFrame] | None - - def __init__( - self, - component_address: MCTComponentAddress - ): - super().__init__(component_address=component_address) - - self.configured_transform_to_reference = None - self.configured_camera_parameters = None - self.configured_marker_parameters = None - - self.request_id = None - self.current_resolution = None - self.current_intrinsic_parameters = None - self.latest_frame = None - self.recording = [] - - def create_deinitialization_request_series(self) -> MCTRequestSeries: - return MCTRequestSeries(series=[DetectorStopRequest()]) - - def create_initialization_request_series(self) -> MCTRequestSeries: - series: list[MCTRequest] = [DetectorStartRequest()] - if self.configured_camera_parameters is not None: - series.append(CameraParametersSetRequest(parameters=self.configured_camera_parameters)) - if self.configured_marker_parameters is not None: - series.append(MarkerParametersSetRequest(parameters=self.configured_marker_parameters)) - return MCTRequestSeries(series=series) - - def handle_deinitialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.DeinitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], (EmptyResponse, CameraParametersSetResponse)): - self.enqueue_status_message( - severity="warning", - message=f"The deinitialization response was not of the expected type EmptyResponse.") - return Connection.DeinitializationResult.SUCCESS - - def handle_initialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.InitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to initialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], EmptyResponse): - self.enqueue_status_message( - severity="warning", - message=f"The initialization response was not of the expected type EmptyResponse.") - return Connection.InitializationResult.SUCCESS - - def supported_response_types(self) -> list[type[MCTResponse]]: - return super().supported_response_types() + [ - CalibrationCalculateResponse, - CalibrationImageAddResponse, - CalibrationImageGetResponse, - CalibrationImageMetadataListResponse, - CalibrationResolutionListResponse, - CalibrationResultGetResponse, - CalibrationResultGetActiveResponse, - CalibrationResultMetadataListResponse, - CameraImageGetResponse, - CameraParametersGetResponse, - CameraParametersSetResponse, - CameraResolutionGetResponse, - DetectorFrameGetResponse, - MarkerParametersGetResponse] diff --git a/src/controller/structures/mct_component_address.py b/src/controller/structures/mct_component_address.py deleted file mode 100644 index 090256f..0000000 --- a/src/controller/structures/mct_component_address.py +++ /dev/null @@ -1,14 +0,0 @@ -from src.common.structures.mct_component import ComponentRoleLabel -from ipaddress import IPv4Address -from pydantic import BaseModel, Field - - -class MCTComponentAddress(BaseModel): - """ - Information used to establish a connection, - there is nothing that should change here without a user's explicit input. - """ - label: str = Field() - role: ComponentRoleLabel = Field() - ip_address: IPv4Address = Field() - port: int = Field() diff --git a/src/controller/structures/pose_solver_connection.py b/src/controller/structures/pose_solver_connection.py deleted file mode 100644 index 6b69231..0000000 --- a/src/controller/structures/pose_solver_connection.py +++ /dev/null @@ -1,94 +0,0 @@ -from src.common.structures.pose_solver import PoseSolverFrame -from .mct_component_address import MCTComponentAddress -from .connection import Connection -from src.common.api import \ - EmptyResponse, \ - MCTRequest, \ - MCTRequestSeries, \ - MCTResponse, \ - MCTResponseSeries -from src.common.structures import \ - KeyValueSimpleAny, \ - Pose, \ - TargetBase -from src.pose_solver.api import \ - PoseSolverGetPosesResponse, \ - PoseSolverSetTargetsRequest, \ - PoseSolverStartRequest, \ - PoseSolverStopRequest -import datetime -import uuid - - -class PoseSolverConnection(Connection): - - # These are variables used directly by the MCTController for storing data - - configured_solver_parameters: list[KeyValueSimpleAny] | None - configured_targets: list[TargetBase] | None - - request_id: uuid.UUID | None - detector_poses: list[Pose] - target_poses: list[Pose] - detector_timestamps: dict[str, datetime.datetime] # access by detector_label - poses_timestamp: datetime.datetime - recording: list[PoseSolverFrame] | None - - def __init__( - self, - component_address: MCTComponentAddress - ): - super().__init__(component_address=component_address) - - self.configured_solver_parameters = None - self.configured_targets = None - - self.request_id = None - self.detector_poses = list() - self.target_poses = list() - self.detector_timestamps = dict() - self.poses_timestamp = datetime.datetime.min - self.recording = [] - - def create_deinitialization_request_series(self) -> MCTRequestSeries: - return MCTRequestSeries(series=[PoseSolverStopRequest()]) - - def create_initialization_request_series(self) -> MCTRequestSeries: - series: list[MCTRequest] = [PoseSolverStartRequest()] - if self.configured_targets is not None: - series.append(PoseSolverSetTargetsRequest(targets=self.configured_targets)) - return MCTRequestSeries(series=series) - - def handle_deinitialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.DeinitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], EmptyResponse): - self.enqueue_status_message( - severity="warning", - message=f"The deinitialization response was not of the expected type EmptyResponse.") - return Connection.DeinitializationResult.SUCCESS - - def handle_initialization_response_series( - self, - response_series: MCTResponseSeries - ) -> Connection.InitializationResult: - response_count: int = len(response_series.series) - if response_count != 1: - self.enqueue_status_message( - severity="warning", - message=f"Expected exactly one response to initialization requests. Got {response_count}.") - elif not isinstance(response_series.series[0], EmptyResponse): - self.enqueue_status_message( - severity="warning", - message=f"The initialization response was not of the expected type EmptyResponse.") - return Connection.InitializationResult.SUCCESS - - def supported_response_types(self) -> list[type[MCTResponse]]: - return super().supported_response_types() + [ - PoseSolverGetPosesResponse] diff --git a/src/controller/structures/startup_mode.py b/src/controller/structures/startup_mode.py deleted file mode 100644 index 9c085fe..0000000 --- a/src/controller/structures/startup_mode.py +++ /dev/null @@ -1,7 +0,0 @@ -from enum import StrEnum -from typing import Final - - -class StartupMode(StrEnum): - DETECTING_ONLY: Final[str] = "detecting_only" - DETECTING_AND_SOLVING: Final[str] = "detecting_and_solving" diff --git a/src/detector/structures/status.py b/src/detector/structures/status.py index 760712f..91a98cc 100644 --- a/src/detector/structures/status.py +++ b/src/detector/structures/status.py @@ -7,9 +7,6 @@ class CameraStatus(StrEnum): RUNNING: Final[int] = "RUNNING" FAILURE: Final[int] = "FAILURE" - def in_runnable_state(self): - return self == CameraStatus.RUNNING - class MarkerStatus(StrEnum): STOPPED: Final[int] = "STOPPED" diff --git a/src/gui/panels/controller_panel.py b/src/gui/panels/controller_panel.py index f172b8f..716a29a 100644 --- a/src/gui/panels/controller_panel.py +++ b/src/gui/panels/controller_panel.py @@ -7,8 +7,8 @@ StatusMessage, \ StatusMessageSource from src.controller import \ - MCTController, \ - ConnectionReport + Connection, \ + MCTController from typing import Final import wx import wx.grid @@ -27,7 +27,7 @@ class ControllerPanel(BasePanel): _log_panel: LogPanel _controller_status: str # last status reported by MCTController - _connection_reports: list[ConnectionReport] + _connection_reports: list[Connection.Report] _is_updating: bool # Some things should only trigger during explicit user events def __init__( @@ -174,7 +174,7 @@ def update_controller_buttons(self): def update_connection_table_display(self) -> None: # Return if there is no change - connection_reports: list[ConnectionReport] = self._controller.get_connection_reports() + connection_reports: list[Connection.Report] = self._controller.get_connection_reports() if len(connection_reports) == len(self._connection_reports): identical: bool = True for connection_report in connection_reports: diff --git a/src/gui/panels/specialized/connection_table.py b/src/gui/panels/specialized/connection_table.py index ff13e86..7676df5 100644 --- a/src/gui/panels/specialized/connection_table.py +++ b/src/gui/panels/specialized/connection_table.py @@ -1,6 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.controller import \ - ConnectionReport +from src.controller import Connection from typing import Final import wx @@ -14,7 +13,7 @@ _COL_LABELS: Final[list[str]] = ["Label", "Role", "IP Address", "Port", "Status"] -class ConnectionTable(RowSelectionTable[ConnectionReport]): +class ConnectionTable(RowSelectionTable[Connection.Report]): def __init__( self, parent: wx.Window, @@ -28,7 +27,7 @@ def __init__( def _set_row_contents( self, row_index: int, - row_content: ConnectionReport + row_content: Connection.Report ): self.table.SetCellValue( row=row_index, diff --git a/src/slicer_connection.py b/src/slicer_connection.py index ca7901a..25caa39 100644 --- a/src/slicer_connection.py +++ b/src/slicer_connection.py @@ -6,18 +6,17 @@ import time as t import logging -from src.common.api.mct_request_series import MCTRequestSeries +from src.common.api import MCTRequestSeries from src.common.structures.mct_component import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER from src.common.structures import TargetBase from src.controller.mct_controller import MCTController -from src.controller.structures.mct_component_address import MCTComponentAddress from ipaddress import IPv4Address from src.pose_solver.api import PoseSolverAddTargetMarkerRequest from src.pose_solver.api import TargetMarker from src.pose_solver.api import PoseSolverSetReferenceRequest -from src.controller.structures.connection import Connection +from src.controller import Connection # Input filepath is specified by command line arguments if len(sys.argv) < 2: diff --git a/src/util/measure_detector_to_reference.py b/src/util/measure_detector_to_reference.py index fac284b..f24dd3d 100644 --- a/src/util/measure_detector_to_reference.py +++ b/src/util/measure_detector_to_reference.py @@ -12,9 +12,8 @@ from src.board_builder.board_builder import BoardBuilder from src.common.structures.mct_component import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER -from src.controller.mct_controller import MCTController -from src.controller.structures.mct_component_address import MCTComponentAddress -from src.pose_solver.util import average_quaternion, average_vector +from src.controller import Connection, MCTController +from src.common import MathUtils # input_filepath = "/home/adminpi5/Documents/MCSTrack/data/measure_detector_to_reference_config.json" if len(sys.argv) < 2: @@ -41,12 +40,11 @@ async def main(): ITERATIONS = 20 for detector in detectors: - controller.add_connection(MCTComponentAddress( - label=detector['label'], - role=COMPONENT_ROLE_LABEL_DETECTOR, - ip_address=detector['ip_address'], - port=detector['port'] - )) + controller.add_connection(Connection.ComponentAddress( + label=detector['label'], + role=COMPONENT_ROLE_LABEL_DETECTOR, + ip_address=detector['ip_address'], + port=detector['port'])) all_measured_transforms_by_detector[detector['label']] = [] controller.start_up() @@ -92,8 +90,8 @@ async def main(): quaternions.append(quaternion) translations.append(translation) - avg_quaternion = average_quaternion(quaternions) - avg_translation = average_vector(translations) + avg_quaternion = MathUtils.average_quaternion(quaternions) + avg_translation = MathUtils.average_vector(translations) avg_rotation_matrix = R.from_quat(avg_quaternion).as_matrix() From df7e4a94b221de22d435d7720bc1579680390911 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 10 Jul 2025 14:23:20 -0400 Subject: [PATCH 06/33] WIP: Further consolidation, globally. "Marker" has been renamed to "Annotator" --- src/common/__init__.py | 6 + .../annotator.py} | 47 +- .../abstract_camera.py => common/camera.py} | 48 +- src/common/exceptions.py | 6 +- src/common/structures/detector.py | 2 +- src/controller/connection.py | 8 +- src/detector/__init__.py | 43 +- src/detector/api.py | 88 ++- src/detector/detector.py | 112 ++-- src/detector/detector_app.py | 47 +- src/detector/exceptions.py | 9 - .../implementations/marker_aruco_opencv.py | 150 ----- src/detector/interfaces/__init__.py | 2 - src/detector/intrinsic_calibrator.py | 219 +++++-- src/detector/structures/__init__.py | 16 - src/detector/structures/calibration_map.py | 73 --- .../structures/detector_configuration.py | 24 - src/detector/structures/status.py | 14 - src/detector/util.py | 467 -------------- src/gui/panels/calibrator_panel.py | 32 +- src/gui/panels/detector_panel.py | 16 +- .../specialized/calibration_image_table.py | 6 +- .../specialized/calibration_result_table.py | 6 +- .../implementations/__init__.py | 0 src/implementations/annotator_aruco_opencv.py | 586 ++++++++++++++++++ .../camera_opencv_capture_device.py | 27 +- .../implementations/camera_picamera2.py | 20 +- test/test_extrinsic_calibration.py | 13 +- 28 files changed, 1043 insertions(+), 1044 deletions(-) rename src/{detector/interfaces/abstract_marker.py => common/annotator.py} (60%) rename src/{detector/interfaces/abstract_camera.py => common/camera.py} (69%) delete mode 100644 src/detector/exceptions.py delete mode 100644 src/detector/implementations/marker_aruco_opencv.py delete mode 100644 src/detector/interfaces/__init__.py delete mode 100644 src/detector/structures/__init__.py delete mode 100644 src/detector/structures/calibration_map.py delete mode 100644 src/detector/structures/detector_configuration.py delete mode 100644 src/detector/structures/status.py delete mode 100644 src/detector/util.py rename src/{detector => }/implementations/__init__.py (100%) create mode 100644 src/implementations/annotator_aruco_opencv.py rename src/{detector => }/implementations/camera_opencv_capture_device.py (94%) rename src/{detector => }/implementations/camera_picamera2.py (96%) diff --git a/src/common/__init__.py b/src/common/__init__.py index 565d53e..1d82bfd 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -1,3 +1,6 @@ +from .annotator import \ + Annotator, \ + MCTAnnotatorRuntimeError from .api import \ DequeueStatusMessagesRequest, \ DequeueStatusMessagesResponse, \ @@ -11,6 +14,9 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest +from .camera import \ + Camera, \ + MCTCameraRuntimeError from .exceptions import \ MCTError, \ MCTParsingError diff --git a/src/detector/interfaces/abstract_marker.py b/src/common/annotator.py similarity index 60% rename from src/detector/interfaces/abstract_marker.py rename to src/common/annotator.py index fd813f7..a24d8dd 100644 --- a/src/detector/interfaces/abstract_marker.py +++ b/src/common/annotator.py @@ -1,30 +1,53 @@ -from ..structures import \ - MarkerConfiguration, \ - MarkerStatus -from src.common import \ +from .exceptions import \ + MCTError +from .status_messages import \ SeverityLabel, \ StatusMessageSource -from src.common.structures import \ +from .structures import \ MarkerSnapshot, \ KeyValueMetaAny, \ KeyValueSimpleAny import abc import datetime +from enum import StrEnum import numpy +from pydantic import BaseModel, Field +from typing import Final -class AbstractMarker(abc.ABC): +class _Configuration(BaseModel): + method: str = Field() + + +class _Status(StrEnum): + STOPPED: Final[int] = "STOPPED" + RUNNING: Final[int] = "RUNNING" + FAILURE: Final[int] = "FAILURE" + + +class MCTAnnotatorRuntimeError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class Annotator(abc.ABC): """ - Functions may raise MCTDetectorRuntimeError + Functions may raise MCTMarkerRuntimeError """ - _configuration: MarkerConfiguration - _status: MarkerStatus + Configuration: type[_Configuration] = _Configuration + Status: type[_Status] = _Status + + _configuration: Configuration + _status: Status _status_message_source: StatusMessageSource def __init__( self, - configuration: MarkerConfiguration, + configuration: Configuration, status_message_source: StatusMessageSource ): self._configuration = configuration @@ -37,10 +60,10 @@ def add_status_message( ) -> None: self._status_message_source.enqueue_status_message(severity=severity, message=message) - def get_status(self) -> MarkerStatus: + def get_status(self) -> Status: return self._status - def set_status(self, status: MarkerStatus) -> None: + def set_status(self, status: Status) -> None: self._status = status @abc.abstractmethod diff --git a/src/detector/interfaces/abstract_camera.py b/src/common/camera.py similarity index 69% rename from src/detector/interfaces/abstract_camera.py rename to src/common/camera.py index 6bb9c1b..f625447 100644 --- a/src/detector/interfaces/abstract_camera.py +++ b/src/common/camera.py @@ -1,10 +1,9 @@ -from ..structures import \ - CameraConfiguration, \ - CameraStatus -from src.common import \ +from .exceptions import \ + MCTError +from .status_messages import \ SeverityLabel, \ StatusMessageSource -from src.common.structures import \ +from .structures import \ CaptureFormat, \ ImageResolution, \ KeyValueSimpleAny, \ @@ -13,21 +12,46 @@ import base64 import cv2 import datetime +from enum import StrEnum import numpy +from pydantic import BaseModel, Field +from typing import Final, Union -class AbstractCamera(abc.ABC): +class _Configuration(BaseModel): + driver: str = Field() + capture_device: Union[str, int] = Field() # Not used by all drivers (notably it IS used by OpenCV) + + +class _Status(StrEnum): + STOPPED: Final[int] = "STOPPED" + RUNNING: Final[int] = "RUNNING" + FAILURE: Final[int] = "FAILURE" + + +class MCTCameraRuntimeError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class Camera(abc.ABC): """ - Functions may raise MCTDetectorRuntimeError + Functions may raise MCTCameraRuntimeError """ - _configuration: CameraConfiguration - _status: CameraStatus + Status: type[_Status] = _Status + Configuration: type[_Configuration] = _Configuration + + _configuration: Configuration + _status: Status _status_message_source: StatusMessageSource def __init__( self, - configuration: CameraConfiguration, + configuration: Configuration, status_message_source: StatusMessageSource ): self._configuration = configuration @@ -59,10 +83,10 @@ def get_encoded_image( encoded_image_rgb_base64: str = base64.b64encode(encoded_image_rgb_bytes) return encoded_image_rgb_base64 - def get_status(self) -> CameraStatus: + def get_status(self) -> Status: return self._status - def set_status(self, status: CameraStatus) -> None: + def set_status(self, status: Status) -> None: self._status = status @abc.abstractmethod diff --git a/src/common/exceptions.py b/src/common/exceptions.py index dd46d7a..8fc5103 100644 --- a/src/common/exceptions.py +++ b/src/common/exceptions.py @@ -6,10 +6,6 @@ def __init__(self, *args): class MCTParsingError(MCTError): message: str - def __init__( - self, - message: str, - *args - ): + def __init__(self, message: str, *args): super().__init__(args) self.message = message diff --git a/src/common/structures/detector.py b/src/common/structures/detector.py index adb5ade..e19ae7b 100644 --- a/src/common/structures/detector.py +++ b/src/common/structures/detector.py @@ -17,7 +17,7 @@ class MarkerCornerImagePoint(BaseModel): class MarkerSnapshot(BaseModel): - label: str = Field() + label: str = Field() # Empty indicates that something was detected but not identified corner_image_points: list[MarkerCornerImagePoint] = Field() diff --git a/src/controller/connection.py b/src/controller/connection.py index d9e5e9d..95a5de5 100644 --- a/src/controller/connection.py +++ b/src/controller/connection.py @@ -37,8 +37,8 @@ DetectorFrameGetResponse, \ DetectorStartRequest, \ DetectorStopRequest, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest from src.pose_solver.api import \ PoseSolverGetPosesResponse, \ PoseSolverSetTargetsRequest, \ @@ -602,7 +602,7 @@ def create_initialization_request_series(self) -> MCTRequestSeries: if self.configured_camera_parameters is not None: series.append(CameraParametersSetRequest(parameters=self.configured_camera_parameters)) if self.configured_marker_parameters is not None: - series.append(MarkerParametersSetRequest(parameters=self.configured_marker_parameters)) + series.append(AnnotatorParametersSetRequest(parameters=self.configured_marker_parameters)) return MCTRequestSeries(series=series) def handle_deinitialization_response_series( @@ -650,7 +650,7 @@ def supported_response_types(self) -> list[type[MCTResponse]]: CameraParametersSetResponse, CameraResolutionGetResponse, DetectorFrameGetResponse, - MarkerParametersGetResponse] + AnnotatorParametersGetResponse] class PoseSolverConnection(Connection): diff --git a/src/detector/__init__.py b/src/detector/__init__.py index bc81790..de7443b 100644 --- a/src/detector/__init__.py +++ b/src/detector/__init__.py @@ -1,6 +1,39 @@ +from .api import \ + CalibrationCalculateRequest, \ + CalibrationCalculateResponse, \ + CalibrationDeleteStagedRequest, \ + CalibrationImageAddRequest, \ + CalibrationImageAddResponse, \ + CalibrationImageGetRequest, \ + CalibrationImageGetResponse, \ + CalibrationImageMetadataListRequest, \ + CalibrationImageMetadataListResponse, \ + CalibrationImageMetadataUpdateRequest, \ + CalibrationResolutionListRequest, \ + CalibrationResolutionListResponse, \ + CalibrationResultGetRequest, \ + CalibrationResultGetResponse, \ + CalibrationResultGetActiveRequest, \ + CalibrationResultGetActiveResponse, \ + CalibrationResultMetadataListRequest, \ + CalibrationResultMetadataListResponse, \ + CalibrationResultMetadataUpdateRequest, \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + CameraParametersGetRequest, \ + CameraParametersGetResponse, \ + CameraParametersSetRequest, \ + CameraParametersSetResponse, \ + CameraResolutionGetRequest, \ + CameraResolutionGetResponse, \ + DetectorFrameGetRequest, \ + DetectorFrameGetResponse, \ + DetectorStartRequest, \ + DetectorStopRequest, \ + AnnotatorParametersGetRequest, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest from .intrinsic_calibrator import IntrinsicCalibrator -from .detector import Detector -from .interfaces import \ - AbstractCamera, \ - AbstractMarker -from .structures import DetectorConfiguration +from .detector import \ + Detector, \ + DetectorConfiguration diff --git a/src/detector/api.py b/src/detector/api.py index f56cea4..1b4190d 100644 --- a/src/detector/api.py +++ b/src/detector/api.py @@ -8,15 +8,48 @@ ImageResolution, \ KeyValueMetaAny, \ KeyValueSimpleAny -from .structures import \ - CalibrationImageMetadata, \ - CalibrationImageState, \ - CalibrationResultMetadata, \ - CalibrationResultState +from .intrinsic_calibrator import IntrinsicCalibrator from pydantic import Field, SerializeAsAny from typing import Final, Literal +class AnnotatorParametersGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" + + @staticmethod + def parsable_type_identifier() -> str: + return AnnotatorParametersGetRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class AnnotatorParametersGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" + + @staticmethod + def parsable_type_identifier() -> str: + return AnnotatorParametersGetResponse._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() + + +class AnnotatorParametersSetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_set" + + @staticmethod + def parsable_type_identifier() -> str: + return AnnotatorParametersSetRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() + + class CalibrationCalculateRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_calculate" @@ -128,7 +161,7 @@ def parsable_type_identifier() -> str: # noinspection PyTypeHints parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - metadata_list: list[CalibrationImageMetadata] = Field(default_factory=list) + metadata_list: list[IntrinsicCalibrator.ImageMetadata] = Field(default_factory=list) class CalibrationImageMetadataUpdateRequest(MCTRequest): @@ -142,7 +175,7 @@ def parsable_type_identifier() -> str: parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) image_identifier: str = Field() - image_state: CalibrationImageState = Field() + image_state: IntrinsicCalibrator.ImageState = Field() image_label: str | None = Field(default=None) @@ -243,7 +276,7 @@ def parsable_type_identifier() -> str: # noinspection PyTypeHints parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - metadata_list: list[CalibrationResultMetadata] = Field(default_factory=list) + metadata_list: list[IntrinsicCalibrator.ResultMetadata] = Field(default_factory=list) class CalibrationResultMetadataUpdateRequest(MCTRequest): @@ -257,7 +290,7 @@ def parsable_type_identifier() -> str: parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) result_identifier: str = Field() - result_state: CalibrationResultState = Field() + result_state: IntrinsicCalibrator.ResultState = Field() result_label: str | None = Field(default=None) @@ -410,40 +443,3 @@ def parsable_type_identifier() -> str: # noinspection PyTypeHints parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - -class MarkerParametersGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" - - @staticmethod - def parsable_type_identifier() -> str: - return MarkerParametersGetRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - -class MarkerParametersGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" - - @staticmethod - def parsable_type_identifier() -> str: - return MarkerParametersGetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() - - -class MarkerParametersSetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_set" - - @staticmethod - def parsable_type_identifier() -> str: - return MarkerParametersSetRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() diff --git a/src/detector/detector.py b/src/detector/detector.py index 9ee6ebb..ef4ddc6 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -1,4 +1,7 @@ from .api import \ + AnnotatorParametersGetRequest, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest, \ CalibrationCalculateRequest, \ CalibrationCalculateResponse, \ CalibrationDeleteStagedRequest, \ @@ -29,26 +32,18 @@ DetectorFrameGetRequest, \ DetectorFrameGetResponse, \ DetectorStartRequest, \ - DetectorStopRequest, \ - MarkerParametersGetRequest, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest -from .intrinsic_calibrator import IntrinsicCalibrator -from .exceptions import \ - MCTDetectorRuntimeError -from .interfaces import \ - AbstractMarker, \ - AbstractCamera -from .structures import \ - CalibrationImageMetadata, \ - CalibrationResultMetadata, \ - CameraStatus, \ - DetectorConfiguration, \ - MarkerStatus + DetectorStopRequest +from .intrinsic_calibrator import \ + IntrinsicCalibrator, \ + MCTIntrinsicCalibrationError from src.common import \ + Annotator, \ + Camera, \ EmptyResponse, \ ErrorResponse, \ + MCTCameraRuntimeError, \ MCTComponent, \ + MCTAnnotatorRuntimeError, \ MCTRequest, \ MCTResponse, \ PythonUtils @@ -62,26 +57,36 @@ KeyValueSimpleAny import logging from typing import Callable +from pydantic import BaseModel, Field logger = logging.getLogger(__name__) +class DetectorConfiguration(BaseModel): + """ + Top-level schema for Detector initialization data + """ + calibrator_configuration: IntrinsicCalibrator.Configuration = Field() + camera_configuration: Camera.Configuration = Field() + marker_configuration: Annotator.Configuration = Field() + + class Detector(MCTComponent): _detector_configuration: DetectorConfiguration _calibrator: IntrinsicCalibrator - _camera: AbstractCamera - _marker: AbstractMarker + _camera: Camera + _marker: Annotator _frame_count: int def __init__( self, detector_configuration: DetectorConfiguration, - camera_type: type[AbstractCamera], - marker_type: type[AbstractMarker] + camera_type: type[Camera], + marker_type: type[Annotator] ): super().__init__( status_source_label="detector", @@ -115,7 +120,7 @@ def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | Erro result_identifier, intrinsic_calibration = self._calibrator.calculate( image_resolution=request.image_resolution, marker_parameters=marker_parameters_kvs) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationCalculateResponse( result_identifier=result_identifier, @@ -124,7 +129,7 @@ def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | Erro def calibration_delete_staged(self, **_kwargs) -> EmptyResponse | ErrorResponse: try: self._calibrator.delete_staged() - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return EmptyResponse() @@ -132,7 +137,7 @@ def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | Erro try: image_base64: str = self._camera.get_encoded_image(image_format=".png", requested_resolution=None) image_identifier: str = self._calibrator.add_image(image_base64=image_base64) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationImageAddResponse(image_identifier=image_identifier) @@ -144,7 +149,7 @@ def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | Error image_base64: str try: image_base64 = self._calibrator.get_image(image_identifier=request.image_identifier) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationImageGetResponse(image_base64=image_base64) @@ -153,11 +158,11 @@ def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataL kwargs=kwargs, key="request", arg_type=CalibrationImageMetadataListRequest) - image_metadata_list: list[CalibrationImageMetadata] + image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] try: image_metadata_list = self._calibrator.list_image_metadata( image_resolution=request.image_resolution) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationImageMetadataListResponse(metadata_list=image_metadata_list) @@ -171,7 +176,7 @@ def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorRe image_identifier=request.image_identifier, image_state=request.image_state, image_label=request.image_label) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return EmptyResponse() @@ -179,7 +184,7 @@ def calibration_resolution_list(self, **_kwargs) -> CalibrationResolutionListRes resolutions: list[ImageResolution] try: resolutions = self._calibrator.list_resolutions() - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationResolutionListResponse(resolutions=resolutions) @@ -191,7 +196,7 @@ def calibration_result_get(self, **kwargs) -> CalibrationResultGetResponse | Err intrinsic_calibration: IntrinsicCalibration try: intrinsic_calibration = self._calibrator.get_result(result_identifier=request.result_identifier) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationResultGetResponse(intrinsic_calibration=intrinsic_calibration) @@ -200,7 +205,7 @@ def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActive try: image_resolution: ImageResolution = self._camera.get_resolution() intrinsic_calibration = self._calibrator.get_result_active(image_resolution=image_resolution) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) @@ -209,11 +214,11 @@ def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadat kwargs=kwargs, key="request", arg_type=CalibrationResultMetadataListRequest) - result_metadata_list: list[CalibrationResultMetadata] + result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] try: result_metadata_list = self._calibrator.list_result_metadata( image_resolution=request.image_resolution) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationResultMetadataListResponse(metadata_list=result_metadata_list) @@ -227,7 +232,7 @@ def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorR result_identifier=request.result_identifier, result_state=request.result_state, result_label=request.result_label) - except MCTDetectorRuntimeError as e: + except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return EmptyResponse() @@ -241,7 +246,7 @@ def camera_image_get(self, **kwargs) -> CameraImageGetResponse | ErrorResponse: encoded_image_base64 = self._camera.get_encoded_image( image_format=request.format, requested_resolution=request.requested_resolution) - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraImageGetResponse( format=request.format, @@ -251,7 +256,7 @@ def camera_parameters_get(self, **_kwargs) -> CameraParametersGetResponse | Erro parameters: list[KeyValueMetaAbstract] try: parameters = self._camera.get_parameters() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraParametersGetResponse(parameters=parameters) @@ -264,7 +269,7 @@ def camera_parameters_set(self, **kwargs) -> CameraParametersSetResponse | Error try: self._camera.set_parameters(parameters=request.parameters) new_resolution = self._camera.get_resolution() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraParametersSetResponse(resolution=new_resolution) @@ -272,7 +277,7 @@ def camera_resolution_get(self, **_kwargs) -> CameraResolutionGetResponse | Erro image_resolution: ImageResolution try: image_resolution = self._camera.get_resolution() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraResolutionGetResponse(resolution=image_resolution) @@ -292,39 +297,39 @@ def detector_frame_get(self, **kwargs) -> DetectorFrameGetResponse | ErrorRespon detector_frame.detected_marker_snapshots = self._marker.get_markers_detected() if request.include_rejected: detector_frame.rejected_marker_snapshots = self._marker.get_markers_rejected() - except MCTDetectorRuntimeError as e: + except (MCTCameraRuntimeError, MCTAnnotatorRuntimeError) as e: return ErrorResponse(message=e.message) return DetectorFrameGetResponse(frame=detector_frame) def detector_start(self, **_kwargs) -> EmptyResponse | ErrorResponse: try: self._camera.start() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return EmptyResponse() def detector_stop(self, **_kwargs) -> EmptyResponse | ErrorResponse: try: self._camera.stop() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return EmptyResponse() - def marker_parameters_get(self, **_kwargs) -> MarkerParametersGetResponse | ErrorResponse: + def marker_parameters_get(self, **_kwargs) -> AnnotatorParametersGetResponse | ErrorResponse: try: parameters = self._marker.get_parameters() - except MCTDetectorRuntimeError as e: + except MCTAnnotatorRuntimeError as e: return ErrorResponse(message=e.message) - return MarkerParametersGetResponse(parameters=parameters) + return AnnotatorParametersGetResponse(parameters=parameters) def marker_parameters_set(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: MarkerParametersSetRequest = PythonUtils.get_kwarg( + request: AnnotatorParametersSetRequest = PythonUtils.get_kwarg( kwargs=kwargs, key="request", - arg_type=MarkerParametersSetRequest) + arg_type=AnnotatorParametersSetRequest) try: self._marker.set_parameters(parameters=request.parameters) - except MCTDetectorRuntimeError as e: + except MCTAnnotatorRuntimeError as e: return ErrorResponse(message=e.message) return EmptyResponse() @@ -349,22 +354,25 @@ def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCT CameraParametersGetRequest: self.camera_parameters_get, CameraParametersSetRequest: self.camera_parameters_set, CameraResolutionGetRequest: self.camera_resolution_get, - MarkerParametersGetRequest: self.marker_parameters_get, - MarkerParametersSetRequest: self.marker_parameters_set}) + AnnotatorParametersGetRequest: self.marker_parameters_get, + AnnotatorParametersSetRequest: self.marker_parameters_set}) return return_value async def update(self): if self.time_sync_active: return - if self._camera.get_status() == CameraStatus.RUNNING: + if self._camera.get_status() == Camera.Status.RUNNING: try: self._camera.update() - except MCTDetectorRuntimeError as e: + except MCTCameraRuntimeError as e: self.add_status_message(severity="error", message=e.message) - if self._marker.get_status() == MarkerStatus.RUNNING and \ + if self._marker.get_status() == Annotator.Status.RUNNING and \ self._camera.get_changed_timestamp() > self._marker.get_changed_timestamp(): - self._marker.update(self._camera.get_image()) + try: + self._marker.update(self._camera.get_image()) + except MCTAnnotatorRuntimeError as e: + self.add_status_message(severity="error", message=e.message) self._frame_count += 1 if self._frame_count % 1000 == 0: print(f"Update count: {self._frame_count}") diff --git a/src/detector/detector_app.py b/src/detector/detector_app.py index 088cd2e..ed21d7e 100644 --- a/src/detector/detector_app.py +++ b/src/detector/detector_app.py @@ -1,4 +1,19 @@ +from .api import \ + CalibrationResultGetActiveResponse, \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + CameraParametersGetResponse, \ + CameraResolutionGetResponse, \ + DetectorFrameGetRequest, \ + DetectorFrameGetResponse, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest +from .detector import \ + Detector, \ + DetectorConfiguration from src.common import \ + Camera, \ + Annotator, \ EmptyResponse, \ ErrorResponse, \ NetworkUtils, \ @@ -6,22 +21,6 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from src.detector import \ - Detector, \ - DetectorConfiguration -from src.detector.api import \ - CalibrationResultGetActiveResponse, \ - CameraImageGetRequest, \ - CameraImageGetResponse, \ - CameraParametersGetResponse, \ - CameraResolutionGetResponse, \ - DetectorFrameGetRequest, \ - DetectorFrameGetResponse, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest -from src.detector.interfaces import \ - AbstractCamera, \ - AbstractMarker import asyncio import base64 from fastapi import FastAPI, Request @@ -47,20 +46,20 @@ def create_app() -> FastAPI: # Eventually it would be preferable to put the initialization logic/mapping below into an abstract factory, # and allow end-users to register custom classes that are not necessarily shipped within this library. - camera_type: type[AbstractCamera] + camera_type: type[Camera] if detector_configuration.camera_configuration.driver == "opencv_capture_device": - from src.detector.implementations.camera_opencv_capture_device import OpenCVCaptureDeviceCamera + from src.implementations.camera_opencv_capture_device import OpenCVCaptureDeviceCamera camera_type = OpenCVCaptureDeviceCamera elif detector_configuration.camera_configuration.driver == "picamera2": - from src.detector.implementations.camera_picamera2 import Picamera2Camera + from src.implementations.camera_picamera2 import Picamera2Camera camera_type = Picamera2Camera else: raise RuntimeError(f"Unsupported camera driver {detector_configuration.camera_configuration.driver}.") - marker_type: type[AbstractMarker] + marker_type: type[Annotator] if detector_configuration.marker_configuration.method == "aruco_opencv": - from src.detector.implementations.marker_aruco_opencv import ArucoOpenCVMarker - marker_type = ArucoOpenCVMarker + from src.implementations.annotator_aruco_opencv import ArucoOpenCVAnnotator + marker_type = ArucoOpenCVAnnotator else: raise RuntimeError(f"Unsupported marker method {detector_configuration.marker_configuration.method}.") @@ -143,12 +142,12 @@ async def camera_get_resolution() -> CameraResolutionGetResponse: return detector.camera_resolution_get() @detector_app.get("/marker/get_parameters") - async def marker_get_parameters() -> MarkerParametersGetResponse | ErrorResponse: + async def marker_get_parameters() -> AnnotatorParametersGetResponse | ErrorResponse: return detector.marker_parameters_get() @detector_app.post("/marker/set_parameters") async def marker_set_parameters( - request: MarkerParametersSetRequest + request: AnnotatorParametersSetRequest ) -> EmptyResponse | ErrorResponse: return detector.marker_parameters_set( request=request) diff --git a/src/detector/exceptions.py b/src/detector/exceptions.py deleted file mode 100644 index 06a2c84..0000000 --- a/src/detector/exceptions.py +++ /dev/null @@ -1,9 +0,0 @@ -from src.common.exceptions import MCTError - - -class MCTDetectorRuntimeError(MCTError): - message: str - - def __init__(self, *args, message: str): - super().__init__(*args) - self.message = message diff --git a/src/detector/implementations/marker_aruco_opencv.py b/src/detector/implementations/marker_aruco_opencv.py deleted file mode 100644 index c08d5ca..0000000 --- a/src/detector/implementations/marker_aruco_opencv.py +++ /dev/null @@ -1,150 +0,0 @@ -from ..exceptions import MCTDetectorRuntimeError -from ..interfaces import AbstractMarker -from ..structures import \ - MarkerConfiguration, \ - MarkerStatus -from ..util import \ - assign_aruco_detection_parameters_to_key_value_list, \ - assign_key_value_list_to_aruco_detection_parameters -from src.common import StatusMessageSource -from src.common.structures import \ - KeyValueMetaAny, \ - KeyValueSimpleAny, \ - MarkerCornerImagePoint, \ - MarkerSnapshot -import cv2.aruco -import datetime -import logging -import numpy -from typing import Any - - -logger = logging.getLogger(__name__) - - -# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html -# for documentation on individual parameters - - -class ArucoOpenCVMarker(AbstractMarker): - - _marker_dictionary: Any | None # created by OpenCV, type cv2.aruco.Dictionary - _marker_parameters: Any # created by OpenCV, type cv2.aruco.DetectorParameters - _marker_label_reverse_dictionary: dict[int, str] - _marker_detected_snapshots: list[MarkerSnapshot] - _marker_rejected_snapshots: list[MarkerSnapshot] - _marker_timestamp_utc: datetime.datetime - - def __init__( - self, - configuration: MarkerConfiguration, - status_message_source: StatusMessageSource - ): - super().__init__( - configuration=configuration, - status_message_source=status_message_source) - - self._marker_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) - self._marker_parameters = cv2.aruco.DetectorParameters() - self._marker_label_reverse_dictionary = dict() - self._marker_detected_snapshots = list() # Markers that are determined to be valid, and are identified - self._marker_rejected_snapshots = list() # Things that looked at first like markers but got later filtered out - self._marker_timestamp_utc = datetime.datetime.min - self.set_status(MarkerStatus.RUNNING) # Always running - - def get_changed_timestamp(self) -> datetime.datetime: - return self._marker_timestamp_utc - - def get_markers_detected(self) -> list[MarkerSnapshot]: - return self._marker_detected_snapshots - - def get_markers_rejected(self) -> list[MarkerSnapshot]: - return self._marker_rejected_snapshots - - def get_parameters(self) -> list[KeyValueMetaAny]: - return assign_aruco_detection_parameters_to_key_value_list(self._marker_parameters) - - @staticmethod - def get_type_identifier() -> str: - return "aruco_opencv" - - @staticmethod - def _marker_corner_image_point_list_from_embedded_list( - corner_image_points_px: list[list[float]] - ) -> list[MarkerCornerImagePoint]: - corner_image_point_list: list[MarkerCornerImagePoint] = list() - assert len(corner_image_points_px) == 4 - for corner_image_point_px in corner_image_points_px: - corner_image_point_list.append(MarkerCornerImagePoint( - x_px=corner_image_point_px[0], - y_px=corner_image_point_px[1])) - return corner_image_point_list - - # noinspection DuplicatedCode - def set_parameters( - self, - parameters: list[KeyValueSimpleAny] - ) -> None: - mismatched_keys: list[str] = assign_key_value_list_to_aruco_detection_parameters( - detection_parameters=self._marker_parameters, - key_value_list=parameters) - if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( - message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") - - def update( - self, - image: numpy.ndarray - ) -> None: - if self._marker_dictionary is None: - message: str = "No marker dictionary has been set." - self.add_status_message(severity="error", message=message) - self.set_status(MarkerStatus.FAILURE) - return - - image_greyscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) - (detected_corner_points_raw, detected_dictionary_indices, rejected_corner_points_raw) = cv2.aruco.detectMarkers( - image=image_greyscale, - dictionary=self._marker_dictionary, - parameters=self._marker_parameters) - - self._marker_detected_snapshots = list() - # note: detected_indices is (inconsistently) None sometimes if no markers are detected - if detected_dictionary_indices is not None and len(detected_dictionary_indices) > 0: - detected_marker_count = detected_dictionary_indices.size - # Shape of some output was previously observed to (also) be inconsistent... make it consistent here: - detected_corner_points_px = numpy.array(detected_corner_points_raw).reshape((detected_marker_count, 4, 2)) - detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_marker_count)) - for detected_marker_index, detected_marker_id in enumerate(detected_dictionary_indices): - if False: # TODO: Re-enable - if detected_marker_id not in self._marker_label_reverse_dictionary: - message: str = \ - f"Found a marker with index {detected_marker_id} "\ - "but it does not appear in the dictionary." - self.add_status_message(severity="error", message=message) - self.set_status(MarkerStatus.FAILURE) - return - marker_label: str = self._marker_label_reverse_dictionary[detected_marker_id] - else: - marker_label: str = str(detected_marker_id) - corner_image_points_px = detected_corner_points_px[detected_marker_index] - corner_image_points: list[MarkerCornerImagePoint] = \ - self._marker_corner_image_point_list_from_embedded_list( - corner_image_points_px=corner_image_points_px.tolist()) - self._marker_detected_snapshots.append(MarkerSnapshot( - label=marker_label, - corner_image_points=corner_image_points)) - - self._marker_rejected_snapshots = list() - if rejected_corner_points_raw: - rejected_corner_points_px = numpy.array(rejected_corner_points_raw).reshape((-1, 4, 2)) - for rejected_marker_index in range(rejected_corner_points_px.shape[0]): - corner_image_points_px = rejected_corner_points_px[rejected_marker_index] - corner_image_points: list[MarkerCornerImagePoint] = \ - self._marker_corner_image_point_list_from_embedded_list( - corner_image_points_px=corner_image_points_px.tolist()) - self._marker_rejected_snapshots.append(MarkerSnapshot( - label=f"unknown", - corner_image_points=corner_image_points)) - - self._marker_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/interfaces/__init__.py b/src/detector/interfaces/__init__.py deleted file mode 100644 index 88c4fb9..0000000 --- a/src/detector/interfaces/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .abstract_camera import AbstractCamera -from .abstract_marker import AbstractMarker diff --git a/src/detector/intrinsic_calibrator.py b/src/detector/intrinsic_calibrator.py index ea8944f..aa711f3 100644 --- a/src/detector/intrinsic_calibrator.py +++ b/src/detector/intrinsic_calibrator.py @@ -1,17 +1,7 @@ -from .exceptions import \ - MCTDetectorRuntimeError -from .structures import \ - CalibratorConfiguration, \ - CalibrationImageMetadata, \ - CalibrationImageState, \ - CalibrationMap, \ - CalibrationMapValue, \ - CalibrationResultMetadata, \ - CalibrationResultState -from .util import assign_key_value_list_to_aruco_detection_parameters from src.common import \ ImageUtils, \ IOUtils, \ + MCTError, \ StatusMessageSource from src.common.structures import \ CharucoBoardSpecification, \ @@ -21,15 +11,20 @@ IntrinsicParameters, \ KeyValueSimpleAny, \ Vec3 +# TODO: +# Intrinsic Calibration could have different implementations. +# This one depends on ArUco, and it may make sense to make an abstraction. +from src.implementations.annotator_aruco_opencv import ArucoOpenCVAnnotator import cv2 import cv2.aruco import datetime +from enum import StrEnum import json from json import JSONDecodeError import logging import numpy import os -from pydantic import ValidationError +from pydantic import BaseModel, Field, ValidationError from typing import Final import uuid @@ -37,10 +32,100 @@ logger = logging.getLogger(__name__) -class IntrinsicCalibrator: +class MCTIntrinsicCalibrationError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class _Configuration(BaseModel): + data_path: str = Field() + + +class _ImageState(StrEnum): + IGNORE: Final[int] = "ignore" + SELECT: Final[int] = "select" + DELETE: Final[int] = "delete" # stage for deletion + + +class _ImageMetadata(BaseModel): + identifier: str = Field() + label: str = Field(default_factory=str) + timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + state: _ImageState = Field(default=_ImageState.SELECT) + + +class _ResultState(StrEnum): + # indicate to use this calibration (as opposed to simply storing it) + # normally there shall only ever be one ACTIVE calibration for a given image resolution + ACTIVE: Final[str] = "active" + + # store the calibration, but don't mark it for use + RETAIN: Final[str] = "retain" + + # stage for deletion + DELETE: Final[str] = "delete" + + +class _ResultMetadata(BaseModel): + identifier: str = Field() + label: str = Field(default_factory=str) + timestamp_utc_iso8601: str = Field( + default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + image_identifiers: list[str] = Field(default_factory=list) + state: _ResultState = Field(default=_ResultState.RETAIN) + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +class _DataMapValue(BaseModel): + image_metadata_list: list[_ImageMetadata] = Field(default_factory=list) + result_metadata_list: list[_ResultMetadata] = Field(default_factory=list) + + +class _DataMapEntry(BaseModel): + key: ImageResolution = Field() + value: _DataMapValue = Field() - _configuration: CalibratorConfiguration - _calibration_map: dict[ImageResolution, CalibrationMapValue] + +class _DataMap(BaseModel): + entries: list[_DataMapEntry] = Field(default_factory=list) + + def as_dict(self) -> dict[ImageResolution, _DataMapValue]: + return_value: dict[ImageResolution, _DataMapValue] = dict() + for entry in self.entries: + if entry.key not in return_value: + return_value[entry.key] = _DataMapValue() + for image_metadata in entry.value.image_metadata_list: + return_value[entry.key].image_metadata_list.append(image_metadata) + for result_metadata in entry.value.result_metadata_list: + return_value[entry.key].result_metadata_list.append(result_metadata) + return return_value + + @staticmethod + def from_dict(in_dict: dict[ImageResolution, _DataMapValue]): + entries: list[_DataMapEntry] = list() + for key in in_dict.keys(): + entries.append(_DataMapEntry(key=key, value=in_dict[key])) + return _DataMap(entries=entries) + + +class IntrinsicCalibrator: + Configuration: type[_Configuration] = _Configuration + ImageState: type[_ImageState] = _ImageState + ImageMetadata: type[_ImageMetadata] = _ImageMetadata + ResultState: type[_ResultState] = _ResultState + ResultMetadata: type[_ResultMetadata] = _ResultMetadata + DataMap: type[_DataMap] = _DataMap + + class IntrinsicCalibratorConfiguration(BaseModel): + data_path: str = Field() + + _configuration: IntrinsicCalibratorConfiguration + _calibration_map: dict[ImageResolution, _DataMapValue] _status_message_source: StatusMessageSource CALIBRATION_MAP_FILENAME: Final[str] = "calibration_map.json" @@ -50,7 +135,7 @@ class IntrinsicCalibrator: def __init__( self, - configuration: CalibratorConfiguration, + configuration: IntrinsicCalibratorConfiguration, status_message_source: StatusMessageSource ): self._configuration = configuration @@ -80,19 +165,19 @@ def add_image( # and that this file does not somehow already exist (highly unlikely) key_path: str = self._path_for_map_key(map_key=map_key) if not self._exists_on_filesystem(path=key_path, pathtype="path", create_path=True): - raise MCTDetectorRuntimeError(message=f"Failed to create storage location for input image.") + raise MCTIntrinsicCalibrationError(message=f"Failed to create storage location for input image.") image_identifier: str = str(uuid.uuid4()) image_filepath = self._image_filepath( map_key=map_key, image_identifier=image_identifier) if os.path.exists(image_filepath): - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Image {image_identifier} appears to already exist. This is never expected to occur. " f"Please try again, and if this error continues to occur then please report a bug.") if map_key not in self._calibration_map: - self._calibration_map[map_key] = CalibrationMapValue() + self._calibration_map[map_key] = _DataMapValue() self._calibration_map[map_key].image_metadata_list.append( - CalibrationImageMetadata(identifier=image_identifier)) + IntrinsicCalibrator.ImageMetadata(identifier=image_identifier)) # noinspection PyTypeChecker image_bytes = ImageUtils.image_to_bytes(image_data=image_data, image_format=IntrinsicCalibrator.IMAGE_FORMAT) with (open(image_filepath, 'wb') as in_file): @@ -111,15 +196,21 @@ def calculate( calibration_key: ImageResolution = image_resolution if calibration_key not in self._calibration_map: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"No images for given resolution {str(image_resolution)} found.") + result_identifier: str = str(uuid.uuid4()) + result_filepath = self._result_filepath( + map_key=calibration_key, + result_identifier=result_identifier) + + aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() - mismatched_keys: list[str] = assign_key_value_list_to_aruco_detection_parameters( + mismatched_keys: list[str] = ArucoOpenCVAnnotator.assign_key_value_list_to_aruco_detection_parameters( detection_parameters=aruco_detector_parameters, key_value_list=marker_parameters) if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") # TODO: ChArUco board to come from somewhere (user? currently assumed to be 10x8 DICT4x4_100) @@ -127,12 +218,12 @@ def calculate( # noinspection PyUnresolvedReferences charuco_board: cv2.aruco.CharucoBoard = charuco_spec.create_board() - calibration_value: CalibrationMapValue = self._calibration_map[calibration_key] + calibration_value: _DataMapValue = self._calibration_map[calibration_key] all_charuco_corners = list() all_charuco_ids = list() image_identifiers: list[str] = list() for image_metadata in calibration_value.image_metadata_list: - if image_metadata.state != CalibrationImageState.SELECT: + if image_metadata.state != _ImageState.SELECT: continue image_filepath: str = self._image_filepath( map_key=calibration_key, @@ -172,7 +263,7 @@ def calculate( all_charuco_ids.append(frame_charuco_ids) if len(all_charuco_corners) <= 0: - raise MCTDetectorRuntimeError(message="The input images did not contain visible markers.") + raise MCTIntrinsicCalibrationError(message="The input images did not contain visible markers.") # outputs to be stored in these containers calibration_result = cv2.aruco.calibrateCameraCharucoExtended( @@ -237,11 +328,6 @@ def calculate( z=charuco_extrinsic_stdevs[i*6 + 2, 0]), reprojection_error=charuco_reprojection_errors[i, 0]) for i in range(0, len(charuco_reprojection_errors))]) - - result_identifier: str = str(uuid.uuid4()) - result_filepath = self._result_filepath( - map_key=calibration_key, - result_identifier=result_identifier) IOUtils.json_write( filepath=result_filepath, json_dict=intrinsic_calibration.model_dump(), @@ -250,11 +336,12 @@ def calculate( message=msg), on_error_for_dev=logger.error, ignore_none=True) - result_metadata: CalibrationResultMetadata = CalibrationResultMetadata( + + result_metadata: IntrinsicCalibrator.ResultMetadata = IntrinsicCalibrator.ResultMetadata( identifier=result_identifier, image_identifiers=image_identifiers) if len(self._calibration_map[calibration_key].result_metadata_list) == 0: - result_metadata.state = CalibrationResultState.ACTIVE # No active result yet, so make this one active + result_metadata.state = _ResultState.ACTIVE # No active result yet, so make this one active self._calibration_map[calibration_key].result_metadata_list.append(result_metadata) self.save() return result_identifier, intrinsic_calibration @@ -277,10 +364,10 @@ def _delete_if_exists(self, filepath: str): def delete_staged(self) -> None: for calibration_key in self._calibration_map.keys(): - calibration_value: CalibrationMapValue = self._calibration_map[calibration_key] + calibration_value: _DataMapValue = self._calibration_map[calibration_key] image_indices_to_delete: list = list() for image_index, image in enumerate(calibration_value.image_metadata_list): - if image.state == CalibrationImageState.DELETE: + if image.state == _ImageState.DELETE: self._delete_if_exists(self._image_filepath( map_key=calibration_key, image_identifier=image.identifier)) @@ -289,7 +376,7 @@ def delete_staged(self) -> None: del calibration_value.image_metadata_list[i] result_indices_to_delete: list = list() for result_index, result in enumerate(calibration_value.result_metadata_list): - if result.state == CalibrationResultState.DELETE: + if result.state == _ResultState.DELETE: self._delete_if_exists(self._result_filepath( map_key=calibration_key, result_identifier=result.identifier)) @@ -327,17 +414,17 @@ def get_image( matching_image_resolution = image_resolution break if found_count < 1: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Image identifier {image_identifier} is not associated with any image.") elif found_count > 1: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Image identifier {image_identifier} is associated with multiple images.") image_filepath = self._image_filepath( map_key=matching_image_resolution, image_identifier=image_identifier) if not os.path.exists(image_filepath): - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"File does not exist for image {image_identifier} " f"and given resolution {str(matching_image_resolution)}.") image_bytes: bytes @@ -345,7 +432,7 @@ def get_image( with (open(image_filepath, 'rb') as in_file): image_bytes = in_file.read() except OSError: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Failed to open image {image_identifier} for " f"given resolution {str(matching_image_resolution)}.") image_base64 = ImageUtils.bytes_to_base64(image_bytes=image_bytes) @@ -365,10 +452,10 @@ def get_result( matching_image_resolution = image_resolution break if found_count < 1: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Image identifier {result_identifier} is not associated with any result.") elif found_count > 1: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Image identifier {result_identifier} is associated with multiple results.") return self._get_result_calibration_from_file( @@ -380,33 +467,33 @@ def get_result_active( image_resolution: ImageResolution ) -> IntrinsicCalibration | None: active_count: int = 0 - matched_metadata: CalibrationResultMetadata | None = None + matched_metadata: IntrinsicCalibrator.ResultMetadata | None = None if image_resolution in self._calibration_map: result_count: int = len(self._calibration_map[image_resolution].result_metadata_list) if result_count > 0: matched_metadata = self._calibration_map[image_resolution].result_metadata_list[0] - if matched_metadata.state == CalibrationResultState.ACTIVE: + if matched_metadata.state == _ResultState.ACTIVE: active_count = 1 for result_index in range(1, result_count): result_metadata = self._calibration_map[image_resolution].result_metadata_list[result_index] - if matched_metadata.state == CalibrationResultState.DELETE: + if matched_metadata.state == _ResultState.DELETE: matched_metadata = result_metadata continue # basically we ignore any data staged for DELETE - elif matched_metadata.state == CalibrationResultState.RETAIN: - if result_metadata.state == CalibrationResultState.ACTIVE: + elif matched_metadata.state == _ResultState.RETAIN: + if result_metadata.state == _ResultState.ACTIVE: active_count += 1 matched_metadata = result_metadata continue # ACTIVE shall of course take priority elif result_metadata.timestamp_utc() > matched_metadata.timestamp_utc(): matched_metadata = result_metadata else: # matched_result_metadata.state == CalibrationResultState.ACTIVE: - if result_metadata.state == CalibrationResultState.ACTIVE: + if result_metadata.state == _ResultState.ACTIVE: # BOTH metadata are marked ACTIVE. This is not expected to occur. Indicates a problem. active_count += 1 if result_metadata.timestamp_utc() > matched_metadata.timestamp_utc(): matched_metadata = result_metadata if matched_metadata is None or \ - matched_metadata.state == CalibrationResultState.DELETE: # no result that is not marked DELETE + matched_metadata.state == _ResultState.DELETE: # no result that is not marked DELETE return None if active_count < 1: @@ -435,7 +522,7 @@ def _get_result_calibration_from_file( map_key=image_resolution, result_identifier=result_identifier) if not os.path.exists(result_filepath): - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"File does not exist for result {result_identifier} " f"and given resolution {str(image_resolution)}.") result_json_raw: str @@ -443,14 +530,14 @@ def _get_result_calibration_from_file( with (open(result_filepath, 'r') as in_file): result_json_raw = in_file.read() except OSError: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Failed to open result {result_identifier} for " f"given resolution {str(image_resolution)}.") result_json_dict: dict try: result_json_dict = dict(json.loads(result_json_raw)) except JSONDecodeError: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Failed to parse result {result_identifier} for " f"given resolution {str(image_resolution)}.") intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration(**result_json_dict) @@ -474,8 +561,8 @@ def list_resolutions(self) -> list[ImageResolution]: def list_image_metadata( self, image_resolution: ImageResolution - ) -> list[CalibrationImageMetadata]: - image_metadata_list: list[CalibrationImageMetadata] = list() + ) -> list[ImageMetadata]: + image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] = list() map_key: ImageResolution = image_resolution if map_key in self._calibration_map: image_metadata_list = self._calibration_map[map_key].image_metadata_list @@ -485,8 +572,8 @@ def list_image_metadata( def list_result_metadata( self, image_resolution: ImageResolution - ) -> list[CalibrationResultMetadata]: - result_metadata_list: list[CalibrationResultMetadata] = list() + ) -> list[ResultMetadata]: + result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] = list() map_key: ImageResolution = image_resolution if map_key in self._calibration_map: result_metadata_list = self._calibration_map[map_key].result_metadata_list @@ -520,9 +607,9 @@ def load(self) -> bool: severity="error", message="Failed to load calibration map from file.") return False - calibration_map: CalibrationMap + calibration_map: IntrinsicCalibrator.DataMap try: - calibration_map = CalibrationMap(**json_dict) + calibration_map = IntrinsicCalibrator.DataMap(**json_dict) except ValidationError as e: logger.error(e) self._status_message_source.enqueue_status_message( @@ -554,7 +641,7 @@ def _result_filepath( def save(self) -> None: IOUtils.json_write( filepath=self._map_filepath(), - json_dict=CalibrationMap.from_dict(self._calibration_map).model_dump(), + json_dict=IntrinsicCalibrator.DataMap.from_dict(self._calibration_map).model_dump(), on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( severity="error", message=msg), @@ -564,7 +651,7 @@ def save(self) -> None: def update_image_metadata( self, image_identifier: str, - image_state: CalibrationImageState, + image_state: ImageState, image_label: str | None ) -> None: found_count: int = 0 @@ -577,7 +664,7 @@ def update_image_metadata( found_count += 1 break if found_count < 1: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Image identifier {image_identifier} is not associated with any image.") elif found_count > 1: self._status_message_source.enqueue_status_message( @@ -589,7 +676,7 @@ def update_image_metadata( def update_result_metadata( self, result_identifier: str, - result_state: CalibrationResultState, + result_state: ResultState, result_label: str | None = None ) -> None: found_count: int = 0 @@ -605,16 +692,16 @@ def update_result_metadata( break # Some cleanup as applicable - if result_state == CalibrationResultState.ACTIVE: + if result_state == _ResultState.ACTIVE: for map_key in matching_map_keys: # If size greater than 1, something is wrong... but nonetheless # we'll ensure there is only one active result per resolution for result in self._calibration_map[map_key].result_metadata_list: - if result.identifier != result_identifier and result.state == CalibrationResultState.ACTIVE: - result.state = CalibrationResultState.RETAIN + if result.identifier != result_identifier and result.state == _ResultState.ACTIVE: + result.state = _ResultState.RETAIN if found_count < 1: - raise MCTDetectorRuntimeError( + raise MCTIntrinsicCalibrationError( message=f"Result identifier {result_identifier} is not associated with any result.") elif found_count > 1: self._status_message_source.enqueue_status_message( diff --git a/src/detector/structures/__init__.py b/src/detector/structures/__init__.py deleted file mode 100644 index 8d5f961..0000000 --- a/src/detector/structures/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from .calibration_map import \ - CalibrationImageMetadata, \ - CalibrationImageState, \ - CalibrationMap, \ - CalibrationMapEntry, \ - CalibrationMapValue, \ - CalibrationResultMetadata, \ - CalibrationResultState -from .detector_configuration import \ - DetectorConfiguration, \ - CalibratorConfiguration, \ - CameraConfiguration, \ - MarkerConfiguration -from .status import \ - CameraStatus, \ - MarkerStatus diff --git a/src/detector/structures/calibration_map.py b/src/detector/structures/calibration_map.py deleted file mode 100644 index 4aebaed..0000000 --- a/src/detector/structures/calibration_map.py +++ /dev/null @@ -1,73 +0,0 @@ -from src.common.structures import ImageResolution -import datetime -from enum import StrEnum -from pydantic import BaseModel, Field -from typing import Final - - -class CalibrationImageState(StrEnum): - IGNORE: Final[int] = "ignore" - SELECT: Final[int] = "select" - DELETE: Final[int] = "delete" # stage for deletion - - -class CalibrationImageMetadata(BaseModel): - identifier: str = Field() - label: str = Field(default_factory=str) - timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) - state: CalibrationImageState = Field(default=CalibrationImageState.SELECT) - - -class CalibrationResultState(StrEnum): - # indicate to use this calibration (as opposed to simply storing it) - # normally there shall only ever be one ACTIVE calibration for a given image resolution - ACTIVE: Final[str] = "active" - - # store the calibration, but don't mark it for use - RETAIN: Final[str] = "retain" - - # stage for deletion - DELETE: Final[str] = "delete" - - -class CalibrationResultMetadata(BaseModel): - identifier: str = Field() - label: str = Field(default_factory=str) - timestamp_utc_iso8601: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) - image_identifiers: list[str] = Field(default_factory=list) - state: CalibrationResultState = Field(default=CalibrationResultState.RETAIN) - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) - - -class CalibrationMapValue(BaseModel): - image_metadata_list: list[CalibrationImageMetadata] = Field(default_factory=list) - result_metadata_list: list[CalibrationResultMetadata] = Field(default_factory=list) - - -class CalibrationMapEntry(BaseModel): - key: ImageResolution = Field() - value: CalibrationMapValue = Field() - - -class CalibrationMap(BaseModel): - entries: list[CalibrationMapEntry] = Field(default_factory=list) - - def as_dict(self) -> dict[ImageResolution, CalibrationMapValue]: - return_value: dict[ImageResolution, CalibrationMapValue] = dict() - for entry in self.entries: - if entry.key not in return_value: - return_value[entry.key] = CalibrationMapValue() - for image_metadata in entry.value.image_metadata_list: - return_value[entry.key].image_metadata_list.append(image_metadata) - for result_metadata in entry.value.result_metadata_list: - return_value[entry.key].result_metadata_list.append(result_metadata) - return return_value - - @staticmethod - def from_dict(in_dict: dict[ImageResolution, CalibrationMapValue]): - entries: list[CalibrationMapEntry] = list() - for key in in_dict.keys(): - entries.append(CalibrationMapEntry(key=key, value=in_dict[key])) - return CalibrationMap(entries=entries) diff --git a/src/detector/structures/detector_configuration.py b/src/detector/structures/detector_configuration.py deleted file mode 100644 index a4537f3..0000000 --- a/src/detector/structures/detector_configuration.py +++ /dev/null @@ -1,24 +0,0 @@ -from pydantic import BaseModel, Field -from typing import Union - - -class CalibratorConfiguration(BaseModel): - data_path: str = Field() - - -class CameraConfiguration(BaseModel): - driver: str = Field() - capture_device: Union[str, int] = Field() # Not used by all drivers (notably it IS used by OpenCV) - - -class MarkerConfiguration(BaseModel): - method: str = Field() - - -class DetectorConfiguration(BaseModel): - """ - Top-level schema for Detector initialization data - """ - calibrator_configuration: CalibratorConfiguration = Field() - camera_configuration: CameraConfiguration = Field() - marker_configuration: MarkerConfiguration = Field() diff --git a/src/detector/structures/status.py b/src/detector/structures/status.py deleted file mode 100644 index 91a98cc..0000000 --- a/src/detector/structures/status.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Final -from enum import StrEnum - - -class CameraStatus(StrEnum): - STOPPED: Final[int] = "STOPPED" - RUNNING: Final[int] = "RUNNING" - FAILURE: Final[int] = "FAILURE" - - -class MarkerStatus(StrEnum): - STOPPED: Final[int] = "STOPPED" - RUNNING: Final[int] = "RUNNING" - FAILURE: Final[int] = "FAILURE" diff --git a/src/detector/util.py b/src/detector/util.py deleted file mode 100644 index d86a3a5..0000000 --- a/src/detector/util.py +++ /dev/null @@ -1,467 +0,0 @@ -from .exceptions import MCTDetectorRuntimeError -from src.common.structures import \ - CornerRefinementMethod, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT, \ - KeyValueMetaAny, \ - KeyValueMetaBool, \ - KeyValueMetaEnum, \ - KeyValueMetaFloat, \ - KeyValueMetaInt, \ - KeyValueSimpleAbstract, \ - KeyValueSimpleAny, \ - KeyValueSimpleBool, \ - KeyValueSimpleFloat, \ - KeyValueSimpleInt, \ - KeyValueSimpleString -import logging -import numpy -from typing import Final, get_args - - -logger = logging.getLogger(__name__) - - -# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html -# for documentation on individual parameters - -# Adaptive Thresholding -KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = "adaptiveThreshWinSizeMin" -KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = "adaptiveThreshWinSizeMax" -KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = "adaptiveThreshWinSizeStep" -KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = "adaptiveThreshConstant" -# Contour Filtering -KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = "minMarkerPerimeterRate" # Marker size ratio -KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = "maxMarkerPerimeterRate" -KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = "polygonalApproxAccuracyRate" # Square tolerance ratio -KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = "minCornerDistanceRate" # Corner separation ratio -KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = "minMarkerDistanceRate" # Marker separation ratio -KEY_MIN_DISTANCE_TO_BORDER: Final[str] = "minDistanceToBorder" # Border distance in pixels -# Bits Extraction -KEY_MARKER_BORDER_BITS: Final[str] = "markerBorderBits" # Border width (px) -KEY_MIN_OTSU_STDDEV: Final[str] = "minOtsuStdDev" # Minimum brightness stdev -KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = "perspectiveRemovePixelPerCell" # Bit Sampling Rate -KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = "perspectiveRemoveIgnoredMarginPerCell" # Bit Margin Ratio -# Marker Identification -KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = "maxErroneousBitsInBorderRate" # Border Error Rate -KEY_ERROR_CORRECTION_RATE: Final[str] = "errorCorrectionRate" # Error Correction Rat -KEY_DETECT_INVERTED_MARKER: Final[str] = "detectInvertedMarker" -KEY_CORNER_REFINEMENT_METHOD: Final[str] = "cornerRefinementMethod" -KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = "cornerRefinementWinSize" -KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = "cornerRefinementMaxIterations" -KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = "cornerRefinementMinAccuracy" -# April Tag Only -KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = "aprilTagCriticalRad" -KEY_APRIL_TAG_DEGLITCH: Final[str] = "aprilTagDeglitch" -KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = "aprilTagMaxLineFitMse" -KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = "aprilTagMaxNmaxima" -KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = "aprilTagMinClusterPixels" -KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = "aprilTagMinWhiteBlackDiff" -KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = "aprilTagQuadDecimate" -KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = "aprilTagQuadSigma" -# ArUco 3 -KEY_USE_ARUCO_3_DETECTION: Final[str] = "useAruco3Detection" -KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = "minMarkerLengthRatioOriginalImg" -KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = "minSideLengthCanonicalImg" - - -def assign_aruco_detection_parameters_to_key_value_list( - detection_parameters: ... # cv2.aruco.DetectionParameters -) -> list[KeyValueMetaAny]: - - return_value: list[KeyValueMetaAny] = list() - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN, - value=detection_parameters.adaptiveThreshWinSizeMin, - range_minimum=1, - range_maximum=99)) - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX, - value=detection_parameters.adaptiveThreshWinSizeMax, - range_minimum=1, - range_maximum=99)) - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP, - value=detection_parameters.adaptiveThreshWinSizeStep, - range_minimum=1, - range_maximum=99, - range_step=2)) - - return_value.append(KeyValueMetaFloat( - key=KEY_ADAPTIVE_THRESH_CONSTANT, - value=detection_parameters.adaptiveThreshConstant, - range_minimum=-255.0, - range_maximum=255.0, - range_step=1.0)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_PERIMETER_RATE, - value=detection_parameters.minMarkerPerimeterRate, - range_minimum=0, - range_maximum=8.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MAX_MARKER_PERIMETER_RATE, - value=detection_parameters.maxMarkerPerimeterRate, - range_minimum=0.0, - range_maximum=8.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_POLYGONAL_APPROX_ACCURACY_RATE, - value=detection_parameters.polygonalApproxAccuracyRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_CORNER_DISTANCE_RATE, - value=detection_parameters.minCornerDistanceRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_DISTANCE_RATE, - value=detection_parameters.minMarkerDistanceRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_MIN_DISTANCE_TO_BORDER, - value=detection_parameters.minDistanceToBorder, - range_minimum=0, - range_maximum=512)) - - return_value.append(KeyValueMetaInt( - key=KEY_MARKER_BORDER_BITS, - value=detection_parameters.markerBorderBits, - range_minimum=1, - range_maximum=9)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_OTSU_STDDEV, - value=detection_parameters.minOtsuStdDev, - range_minimum=0.0, - range_maximum=256.0, - range_step=1.0)) - - return_value.append(KeyValueMetaInt( - key=KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL, - value=detection_parameters.perspectiveRemovePixelPerCell, - range_minimum=1, - range_maximum=20)) - - return_value.append(KeyValueMetaFloat( - key=KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL, - value=detection_parameters.perspectiveRemoveIgnoredMarginPerCell, - range_minimum=0.0, - range_maximum=0.5, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE, - value=detection_parameters.maxErroneousBitsInBorderRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_ERROR_CORRECTION_RATE, - value=detection_parameters.errorCorrectionRate, - range_minimum=-0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaBool( - key=KEY_DETECT_INVERTED_MARKER, - value=detection_parameters.detectInvertedMarker)) - - if detection_parameters.cornerRefinementMethod not in CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: - message: str = f"Corner refinement method appears to be set to an invalid value: " \ - f"{detection_parameters.corner_refinement_method}." - logger.error(message) - raise MCTDetectorRuntimeError(message=message) - corner_refinement_method_text: CornerRefinementMethod = \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT[detection_parameters.cornerRefinementMethod] - return_value.append(KeyValueMetaEnum( - key=KEY_CORNER_REFINEMENT_METHOD, - value=corner_refinement_method_text, - allowable_values=get_args(CornerRefinementMethod))) - - return_value.append(KeyValueMetaInt( - key=KEY_CORNER_REFINEMENT_WIN_SIZE, - value=detection_parameters.cornerRefinementWinSize, - range_minimum=1, - range_maximum=9)) - - return_value.append(KeyValueMetaInt( - key=KEY_CORNER_REFINEMENT_MAX_ITERATIONS, - value=detection_parameters.cornerRefinementMaxIterations, - range_minimum=1, - range_maximum=100)) - - return_value.append(KeyValueMetaFloat( - key=KEY_CORNER_REFINEMENT_MIN_ACCURACY, - value=detection_parameters.cornerRefinementMinAccuracy, - range_minimum=0.0, - range_maximum=5.0, - range_step=0.1)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_CRITICAL_RAD, - value=detection_parameters.aprilTagCriticalRad, - range_minimum=-0.0, - range_maximum=numpy.pi, - range_step=numpy.pi / 20.0)) - - return_value.append(KeyValueMetaBool( - key=KEY_APRIL_TAG_DEGLITCH, - value=detection_parameters.aprilTagDeglitch)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_MAX_LINE_FIT_MSE, - value=detection_parameters.aprilTagMaxLineFitMse, - range_minimum=0.0, - range_maximum=512.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MAX_N_MAXIMA, - value=detection_parameters.aprilTagMaxNmaxima, - range_minimum=1, - range_maximum=100)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MIN_CLUSTER_PIXELS, - value=detection_parameters.aprilTagMinClusterPixels, - range_minimum=0, - range_maximum=512)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF, - value=detection_parameters.aprilTagMinWhiteBlackDiff, - range_minimum=0, - range_maximum=256)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_QUAD_DECIMATE, - value=detection_parameters.aprilTagQuadDecimate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_QUAD_SIGMA, - value=detection_parameters.aprilTagQuadSigma, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - # Note: a relatively recent addition to OpenCV, may not be available in some python versions - if hasattr(detection_parameters, "useAruco3Detection"): - return_value.append(KeyValueMetaBool( - key=KEY_USE_ARUCO_3_DETECTION, - value=detection_parameters.useAruco3Detection)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG, - value=detection_parameters.minMarkerLengthRatioOriginalImg, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_MIN_SIDE_LENGTH_CANONICAL_IMG, - value=detection_parameters.minSideLengthCanonicalImg, - range_minimum=1, - range_maximum=512)) - - return return_value - - -def assign_key_value_list_to_aruco_detection_parameters( - detection_parameters: ..., # cv2.aruco.DetectionParameters - key_value_list: list[KeyValueSimpleAny] -) -> list[str]: - """ - Returns list of mismatched keys - """ - mismatched_keys: list[str] = list() - key_value: KeyValueSimpleAbstract - for key_value in key_value_list: - if key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeMin = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeMax = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeStep = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_CONSTANT: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshConstant = key_value.value - elif key_value.key == KEY_MIN_MARKER_PERIMETER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerPerimeterRate = key_value.value - elif key_value.key == KEY_MAX_MARKER_PERIMETER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.maxMarkerPerimeterRate = key_value.value - elif key_value.key == KEY_POLYGONAL_APPROX_ACCURACY_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.polygonalApproxAccuracyRate = key_value.value - elif key_value.key == KEY_MIN_CORNER_DISTANCE_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minCornerDistanceRate = key_value.value - elif key_value.key == KEY_MIN_MARKER_DISTANCE_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerDistanceRate = key_value.value - elif key_value.key == KEY_MIN_DISTANCE_TO_BORDER: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minDistanceToBorder = key_value.value - elif key_value.key == KEY_MARKER_BORDER_BITS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.markerBorderBits = key_value.value - elif key_value.key == KEY_MIN_OTSU_STDDEV: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minOtsuStdDev = key_value.value - elif key_value.key == KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.perspectiveRemovePixelPerCell = key_value.value - elif key_value.key == KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.perspectiveRemoveIgnoredMarginPerCell = key_value.value - elif key_value.key == KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.maxErroneousBitsInBorderRate = key_value.value - elif key_value.key == KEY_ERROR_CORRECTION_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.errorCorrectionRate = key_value.value - elif key_value.key == KEY_DETECT_INVERTED_MARKER: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.detectInvertedMarker = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_METHOD: - if not isinstance(key_value, KeyValueSimpleString): - mismatched_keys.append(key_value.key) - continue - corner_refinement_method: str = key_value.value - if corner_refinement_method in CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: - # noinspection PyTypeChecker - detection_parameters.cornerRefinementMethod = \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT[corner_refinement_method] - else: - raise MCTDetectorRuntimeError( - message=f"Failed to find corner refinement method {corner_refinement_method}.") - elif key_value.key == KEY_CORNER_REFINEMENT_WIN_SIZE: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementWinSize = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_MAX_ITERATIONS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementMaxIterations = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_MIN_ACCURACY: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementMinAccuracy = key_value.value - elif key_value.key == KEY_APRIL_TAG_CRITICAL_RAD: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagCriticalRad = key_value.value - elif key_value.key == KEY_APRIL_TAG_DEGLITCH: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagDeglitch = int(key_value.value) - elif key_value.key == KEY_APRIL_TAG_MAX_LINE_FIT_MSE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMaxLineFitMse = key_value.value - elif key_value.key == KEY_APRIL_TAG_MAX_N_MAXIMA: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMaxNmaxima = key_value.value - elif key_value.key == KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMinClusterPixels = key_value.value - elif key_value.key == KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMinWhiteBlackDiff = key_value.value - elif key_value.key == KEY_APRIL_TAG_QUAD_DECIMATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagQuadDecimate = key_value.value - elif key_value.key == KEY_APRIL_TAG_QUAD_SIGMA: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagQuadSigma = key_value.value - elif key_value.key == KEY_USE_ARUCO_3_DETECTION: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.useAruco3Detection = key_value.value - elif key_value.key == KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerLengthRatioOriginalImg = key_value.value - elif key_value.key == KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minSideLengthCanonicalImg = key_value.value - else: - mismatched_keys.append(key_value.key) - return mismatched_keys diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/calibrator_panel.py index c828654..312a1a0 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/calibrator_panel.py @@ -20,7 +20,7 @@ ImageResolution from src.controller import \ MCTController -from src.detector.api import \ +from src.detector import \ CalibrationCalculateRequest, \ CalibrationCalculateResponse, \ CalibrationDeleteStagedRequest, \ @@ -35,12 +35,8 @@ CalibrationResultGetResponse, \ CalibrationResultMetadataListRequest, \ CalibrationResultMetadataListResponse, \ - CalibrationResultMetadataUpdateRequest -from src.detector.structures import \ - CalibrationImageMetadata, \ - CalibrationImageState, \ - CalibrationResultMetadata, \ - CalibrationResultState + CalibrationResultMetadataUpdateRequest, \ + IntrinsicCalibrator from io import BytesIO import logging from typing import Optional @@ -77,8 +73,8 @@ class CalibratorPanel(BasePanel): _calibration_in_progress: bool _force_last_result_selected: bool _detector_resolutions: list[ImageResolution] - _image_metadata_list: list[CalibrationImageMetadata] - _result_metadata_list: list[CalibrationResultMetadata] + _image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] + _result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] def __init__( self, @@ -155,7 +151,7 @@ def __init__( parent=control_panel, sizer=control_sizer, label="Image State", - selectable_values=[state.name for state in CalibrationImageState]) + selectable_values=[state.name for state in IntrinsicCalibrator.ImageState]) self._image_update_button: wx.Button = self.add_control_button( parent=control_panel, @@ -208,7 +204,7 @@ def __init__( parent=control_panel, sizer=control_sizer, label="Result State", - selectable_values=[state.name for state in CalibrationResultState]) + selectable_values=[state.name for state in IntrinsicCalibrator.ResultState]) self._result_update_button: wx.Button = self.add_control_button( parent=control_panel, @@ -470,8 +466,8 @@ def _on_image_update_pressed(self, _event: wx.CommandEvent) -> None: ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) image_index: int = self._image_table.get_selected_row_index() image_identifier: str = self._image_metadata_list[image_index].identifier - image_state: CalibrationImageState = \ - CalibrationImageState[self._image_state_selector.selector.GetStringSelection()] + image_state: IntrinsicCalibrator.ImageState = \ + IntrinsicCalibrator.ImageState[self._image_state_selector.selector.GetStringSelection()] image_label: str = self._image_label_textbox.textbox.GetValue() request_series: MCTRequestSeries = MCTRequestSeries(series=[ CalibrationImageMetadataUpdateRequest( @@ -508,8 +504,8 @@ def _on_result_update_pressed(self, _event: wx.CommandEvent) -> None: ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) result_index: int = self._result_table.get_selected_row_index() result_identifier: str = self._result_metadata_list[result_index].identifier - result_state: CalibrationResultState = \ - CalibrationResultState[self._result_state_selector.selector.GetStringSelection()] + result_state: IntrinsicCalibrator.ResultState = \ + IntrinsicCalibrator.ResultState[self._result_state_selector.selector.GetStringSelection()] result_label: str = self._result_label_textbox.textbox.GetValue() request_series: MCTRequestSeries = MCTRequestSeries(series=[ CalibrationResultMetadataUpdateRequest( @@ -564,7 +560,7 @@ def _update_ui_controls(self) -> None: message=f"Selected image index {image_index} is out of bounds. Setting to None.") self._image_table.set_selected_row_index(None) else: - image_metadata: CalibrationImageMetadata = self._image_metadata_list[image_index] + image_metadata: IntrinsicCalibrator.ImageMetadata = self._image_metadata_list[image_index] self._image_label_textbox.Enable(True) self._image_label_textbox.textbox.SetValue(image_metadata.label) self._image_state_selector.Enable(True) @@ -572,7 +568,7 @@ def _update_ui_controls(self) -> None: self._image_update_button.Enable(True) calibration_image_count: int = 0 for image_metadata in self._image_metadata_list: - if image_metadata.state == CalibrationImageState.SELECT: + if image_metadata.state == IntrinsicCalibrator.ImageState.SELECT: calibration_image_count += 1 if calibration_image_count > 0: self._calibrate_button.Enable(True) @@ -592,7 +588,7 @@ def _update_ui_controls(self) -> None: message=f"Selected result index {result_index} is out of bounds. Setting to None.") self._result_table.set_selected_row_index(None) else: - result_metadata: CalibrationResultMetadata = self._result_metadata_list[result_index] + result_metadata: IntrinsicCalibrator.ResultMetadata = self._result_metadata_list[result_index] self._result_display_textbox.Enable(True) self._result_label_textbox.Enable(True) self._result_label_textbox.textbox.SetValue(result_metadata.label) diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index 55e9bbd..7426859 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -32,9 +32,9 @@ CameraParametersGetResponse, \ CameraParametersSetRequest, \ CameraParametersSetResponse, \ - MarkerParametersGetRequest, \ - MarkerParametersGetResponse, \ - MarkerParametersSetRequest + AnnotatorParametersGetRequest, \ + AnnotatorParametersGetResponse, \ + AnnotatorParametersSetRequest import cv2 from io import BytesIO import logging @@ -300,7 +300,7 @@ def begin_get_detector_parameters(self): request_series: MCTRequestSeries = MCTRequestSeries( series=[ CameraParametersGetRequest(), - MarkerParametersGetRequest()]) + AnnotatorParametersGetRequest()]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, request_series=request_series) @@ -324,8 +324,8 @@ def begin_set_detection_parameters(self): key_values: list[KeyValueSimpleAny] = self.populate_key_value_list_from_dynamic_ui( parameter_uis=self._marker_parameter_uis) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - MarkerParametersSetRequest(parameters=key_values), - MarkerParametersGetRequest()]) # sync + AnnotatorParametersSetRequest(parameters=key_values), + AnnotatorParametersGetRequest()]) # sync self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, request_series=request_series) @@ -345,7 +345,7 @@ def handle_response_series( self._handle_capture_snapshot_response(response=response) elif isinstance(response, CameraParametersGetResponse): self._handle_get_capture_parameters_response(response=response) - elif isinstance(response, MarkerParametersGetResponse): + elif isinstance(response, AnnotatorParametersGetResponse): self._handle_get_detection_parameters_response(response=response) elif isinstance(response, ErrorResponse): self.handle_error_response(response=response) @@ -386,7 +386,7 @@ def _handle_get_capture_parameters_response( # noinspection DuplicatedCode def _handle_get_detection_parameters_response( self, - response: MarkerParametersGetResponse + response: AnnotatorParametersGetResponse ): self._marker_parameter_panel.Freeze() self._marker_parameter_sizer.Clear(True) diff --git a/src/gui/panels/specialized/calibration_image_table.py b/src/gui/panels/specialized/calibration_image_table.py index a4449a3..440d623 100644 --- a/src/gui/panels/specialized/calibration_image_table.py +++ b/src/gui/panels/specialized/calibration_image_table.py @@ -1,5 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.detector.structures import CalibrationImageMetadata +from src.detector import IntrinsicCalibrator from typing import Final import wx @@ -12,7 +12,7 @@ _COL_LABELS: Final[list[str]] = ["Identifier", "Label", "Timestamp", "Status"] -class CalibrationImageTable(RowSelectionTable[CalibrationImageMetadata]): +class CalibrationImageTable(RowSelectionTable[IntrinsicCalibrator.ImageMetadata]): def __init__( self, parent: wx.Window, @@ -26,7 +26,7 @@ def __init__( def _set_row_contents( self, row_index: int, - row_content: CalibrationImageMetadata + row_content: IntrinsicCalibrator.ImageMetadata ): self.table.SetCellValue( row=row_index, diff --git a/src/gui/panels/specialized/calibration_result_table.py b/src/gui/panels/specialized/calibration_result_table.py index e782e12..778f1ab 100644 --- a/src/gui/panels/specialized/calibration_result_table.py +++ b/src/gui/panels/specialized/calibration_result_table.py @@ -1,5 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.detector.structures import CalibrationResultMetadata +from src.detector import IntrinsicCalibrator from typing import Final import wx @@ -12,7 +12,7 @@ _COL_LABELS: Final[list[str]] = ["Identifier", "Label", "Timestamp", "Status"] -class CalibrationResultTable(RowSelectionTable[CalibrationResultMetadata]): +class CalibrationResultTable(RowSelectionTable[IntrinsicCalibrator.ResultMetadata]): def __init__( self, parent: wx.Window, @@ -26,7 +26,7 @@ def __init__( def _set_row_contents( self, row_index: int, - row_content: CalibrationResultMetadata + row_content: IntrinsicCalibrator.ResultMetadata ): self.table.SetCellValue( row=row_index, diff --git a/src/detector/implementations/__init__.py b/src/implementations/__init__.py similarity index 100% rename from src/detector/implementations/__init__.py rename to src/implementations/__init__.py diff --git a/src/implementations/annotator_aruco_opencv.py b/src/implementations/annotator_aruco_opencv.py new file mode 100644 index 0000000..ee16a19 --- /dev/null +++ b/src/implementations/annotator_aruco_opencv.py @@ -0,0 +1,586 @@ +from src.common import \ + Annotator, \ + MCTAnnotatorRuntimeError, \ + StatusMessageSource +from src.common.structures import \ + CornerRefinementMethod, \ + CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT, \ + CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT, \ + KeyValueMetaAny, \ + KeyValueMetaBool, \ + KeyValueMetaEnum, \ + KeyValueMetaFloat, \ + KeyValueMetaInt, \ + KeyValueSimpleAbstract, \ + KeyValueSimpleAny, \ + KeyValueSimpleBool, \ + KeyValueSimpleFloat, \ + KeyValueSimpleInt, \ + KeyValueSimpleString, \ + MarkerCornerImagePoint, \ + MarkerSnapshot +import cv2.aruco +import datetime +import logging +import numpy +from typing import Any, Final, get_args + + +logger = logging.getLogger(__name__) + + +# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html +# for documentation on individual parameters + +# Adaptive Thresholding +KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = "adaptiveThreshWinSizeMin" +KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = "adaptiveThreshWinSizeMax" +KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = "adaptiveThreshWinSizeStep" +KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = "adaptiveThreshConstant" +# Contour Filtering +KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = "minMarkerPerimeterRate" # Marker size ratio +KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = "maxMarkerPerimeterRate" +KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = "polygonalApproxAccuracyRate" # Square tolerance ratio +KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = "minCornerDistanceRate" # Corner separation ratio +KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = "minMarkerDistanceRate" # Marker separation ratio +KEY_MIN_DISTANCE_TO_BORDER: Final[str] = "minDistanceToBorder" # Border distance in pixels +# Bits Extraction +KEY_MARKER_BORDER_BITS: Final[str] = "markerBorderBits" # Border width (px) +KEY_MIN_OTSU_STDDEV: Final[str] = "minOtsuStdDev" # Minimum brightness stdev +KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = "perspectiveRemovePixelPerCell" # Bit Sampling Rate +KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = "perspectiveRemoveIgnoredMarginPerCell" # Bit Margin Ratio +# Marker Identification +KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = "maxErroneousBitsInBorderRate" # Border Error Rate +KEY_ERROR_CORRECTION_RATE: Final[str] = "errorCorrectionRate" # Error Correction Rat +KEY_DETECT_INVERTED_MARKER: Final[str] = "detectInvertedMarker" +KEY_CORNER_REFINEMENT_METHOD: Final[str] = "cornerRefinementMethod" +KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = "cornerRefinementWinSize" +KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = "cornerRefinementMaxIterations" +KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = "cornerRefinementMinAccuracy" +# April Tag Only +KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = "aprilTagCriticalRad" +KEY_APRIL_TAG_DEGLITCH: Final[str] = "aprilTagDeglitch" +KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = "aprilTagMaxLineFitMse" +KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = "aprilTagMaxNmaxima" +KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = "aprilTagMinClusterPixels" +KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = "aprilTagMinWhiteBlackDiff" +KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = "aprilTagQuadDecimate" +KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = "aprilTagQuadSigma" +# ArUco 3 +KEY_USE_ARUCO_3_DETECTION: Final[str] = "useAruco3Detection" +KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = "minMarkerLengthRatioOriginalImg" +KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = "minSideLengthCanonicalImg" + + +class ArucoOpenCVAnnotator(Annotator): + + _aruco_dictionary: Any | None # created by OpenCV, type cv2.aruco.Dictionary + _aruco_parameters: Any # created by OpenCV, type cv2.aruco.DetectorParameters + _snapshots_identified: list[MarkerSnapshot] # Markers that are determined to be valid, and are identified + _snapshots_unidentified: list[MarkerSnapshot] # Looked at first like markers but got filtered out + _update_timestamp_utc: datetime.datetime + + def __init__( + self, + configuration: Annotator.Configuration, + status_message_source: StatusMessageSource + ): + super().__init__( + configuration=configuration, + status_message_source=status_message_source) + + self._aruco_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) + self._aruco_parameters = cv2.aruco.DetectorParameters() + self._snapshots_identified = list() + self._snapshots_unidentified = list() + self._update_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + self.set_status(Annotator.Status.RUNNING) # Always running + + @staticmethod + def assign_aruco_detection_parameters_to_key_value_list( + detection_parameters: ... # cv2.aruco.DetectionParameters + ) -> list[KeyValueMetaAny]: + + return_value: list[KeyValueMetaAny] = list() + + return_value.append(KeyValueMetaInt( + key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN, + value=detection_parameters.adaptiveThreshWinSizeMin, + range_minimum=1, + range_maximum=99)) + + return_value.append(KeyValueMetaInt( + key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX, + value=detection_parameters.adaptiveThreshWinSizeMax, + range_minimum=1, + range_maximum=99)) + + return_value.append(KeyValueMetaInt( + key=KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP, + value=detection_parameters.adaptiveThreshWinSizeStep, + range_minimum=1, + range_maximum=99, + range_step=2)) + + return_value.append(KeyValueMetaFloat( + key=KEY_ADAPTIVE_THRESH_CONSTANT, + value=detection_parameters.adaptiveThreshConstant, + range_minimum=-255.0, + range_maximum=255.0, + range_step=1.0)) + + return_value.append(KeyValueMetaFloat( + key=KEY_MIN_MARKER_PERIMETER_RATE, + value=detection_parameters.minMarkerPerimeterRate, + range_minimum=0, + range_maximum=8.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=KEY_MAX_MARKER_PERIMETER_RATE, + value=detection_parameters.maxMarkerPerimeterRate, + range_minimum=0.0, + range_maximum=8.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=KEY_POLYGONAL_APPROX_ACCURACY_RATE, + value=detection_parameters.polygonalApproxAccuracyRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=KEY_MIN_CORNER_DISTANCE_RATE, + value=detection_parameters.minCornerDistanceRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=KEY_MIN_MARKER_DISTANCE_RATE, + value=detection_parameters.minMarkerDistanceRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=KEY_MIN_DISTANCE_TO_BORDER, + value=detection_parameters.minDistanceToBorder, + range_minimum=0, + range_maximum=512)) + + return_value.append(KeyValueMetaInt( + key=KEY_MARKER_BORDER_BITS, + value=detection_parameters.markerBorderBits, + range_minimum=1, + range_maximum=9)) + + return_value.append(KeyValueMetaFloat( + key=KEY_MIN_OTSU_STDDEV, + value=detection_parameters.minOtsuStdDev, + range_minimum=0.0, + range_maximum=256.0, + range_step=1.0)) + + return_value.append(KeyValueMetaInt( + key=KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL, + value=detection_parameters.perspectiveRemovePixelPerCell, + range_minimum=1, + range_maximum=20)) + + return_value.append(KeyValueMetaFloat( + key=KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL, + value=detection_parameters.perspectiveRemoveIgnoredMarginPerCell, + range_minimum=0.0, + range_maximum=0.5, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE, + value=detection_parameters.maxErroneousBitsInBorderRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=KEY_ERROR_CORRECTION_RATE, + value=detection_parameters.errorCorrectionRate, + range_minimum=-0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaBool( + key=KEY_DETECT_INVERTED_MARKER, + value=detection_parameters.detectInvertedMarker)) + + if detection_parameters.cornerRefinementMethod not in CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: + message: str = f"Corner refinement method appears to be set to an invalid value: " \ + f"{detection_parameters.corner_refinement_method}." + logger.error(message) + raise MCTAnnotatorRuntimeError(message=message) + corner_refinement_method_text: CornerRefinementMethod = \ + CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT[detection_parameters.cornerRefinementMethod] + return_value.append(KeyValueMetaEnum( + key=KEY_CORNER_REFINEMENT_METHOD, + value=corner_refinement_method_text, + allowable_values=get_args(CornerRefinementMethod))) + + return_value.append(KeyValueMetaInt( + key=KEY_CORNER_REFINEMENT_WIN_SIZE, + value=detection_parameters.cornerRefinementWinSize, + range_minimum=1, + range_maximum=9)) + + return_value.append(KeyValueMetaInt( + key=KEY_CORNER_REFINEMENT_MAX_ITERATIONS, + value=detection_parameters.cornerRefinementMaxIterations, + range_minimum=1, + range_maximum=100)) + + return_value.append(KeyValueMetaFloat( + key=KEY_CORNER_REFINEMENT_MIN_ACCURACY, + value=detection_parameters.cornerRefinementMinAccuracy, + range_minimum=0.0, + range_maximum=5.0, + range_step=0.1)) + + return_value.append(KeyValueMetaFloat( + key=KEY_APRIL_TAG_CRITICAL_RAD, + value=detection_parameters.aprilTagCriticalRad, + range_minimum=-0.0, + range_maximum=numpy.pi, + range_step=numpy.pi / 20.0)) + + return_value.append(KeyValueMetaBool( + key=KEY_APRIL_TAG_DEGLITCH, + value=detection_parameters.aprilTagDeglitch)) + + return_value.append(KeyValueMetaFloat( + key=KEY_APRIL_TAG_MAX_LINE_FIT_MSE, + value=detection_parameters.aprilTagMaxLineFitMse, + range_minimum=0.0, + range_maximum=512.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=KEY_APRIL_TAG_MAX_N_MAXIMA, + value=detection_parameters.aprilTagMaxNmaxima, + range_minimum=1, + range_maximum=100)) + + return_value.append(KeyValueMetaInt( + key=KEY_APRIL_TAG_MIN_CLUSTER_PIXELS, + value=detection_parameters.aprilTagMinClusterPixels, + range_minimum=0, + range_maximum=512)) + + return_value.append(KeyValueMetaInt( + key=KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF, + value=detection_parameters.aprilTagMinWhiteBlackDiff, + range_minimum=0, + range_maximum=256)) + + return_value.append(KeyValueMetaFloat( + key=KEY_APRIL_TAG_QUAD_DECIMATE, + value=detection_parameters.aprilTagQuadDecimate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=KEY_APRIL_TAG_QUAD_SIGMA, + value=detection_parameters.aprilTagQuadSigma, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + # Note: a relatively recent addition to OpenCV, may not be available in some python versions + if hasattr(detection_parameters, "useAruco3Detection"): + return_value.append(KeyValueMetaBool( + key=KEY_USE_ARUCO_3_DETECTION, + value=detection_parameters.useAruco3Detection)) + + return_value.append(KeyValueMetaFloat( + key=KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG, + value=detection_parameters.minMarkerLengthRatioOriginalImg, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=KEY_MIN_SIDE_LENGTH_CANONICAL_IMG, + value=detection_parameters.minSideLengthCanonicalImg, + range_minimum=1, + range_maximum=512)) + + return return_value + + @staticmethod + def assign_key_value_list_to_aruco_detection_parameters( + detection_parameters: ..., # cv2.aruco.DetectionParameters + key_value_list: list[KeyValueSimpleAny] + ) -> list[str]: + """ + Returns list of mismatched keys + """ + mismatched_keys: list[str] = list() + key_value: KeyValueSimpleAbstract + for key_value in key_value_list: + if key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeMin = key_value.value + elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeMax = key_value.value + elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeStep = key_value.value + elif key_value.key == KEY_ADAPTIVE_THRESH_CONSTANT: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshConstant = key_value.value + elif key_value.key == KEY_MIN_MARKER_PERIMETER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerPerimeterRate = key_value.value + elif key_value.key == KEY_MAX_MARKER_PERIMETER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.maxMarkerPerimeterRate = key_value.value + elif key_value.key == KEY_POLYGONAL_APPROX_ACCURACY_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.polygonalApproxAccuracyRate = key_value.value + elif key_value.key == KEY_MIN_CORNER_DISTANCE_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minCornerDistanceRate = key_value.value + elif key_value.key == KEY_MIN_MARKER_DISTANCE_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerDistanceRate = key_value.value + elif key_value.key == KEY_MIN_DISTANCE_TO_BORDER: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minDistanceToBorder = key_value.value + elif key_value.key == KEY_MARKER_BORDER_BITS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.markerBorderBits = key_value.value + elif key_value.key == KEY_MIN_OTSU_STDDEV: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minOtsuStdDev = key_value.value + elif key_value.key == KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.perspectiveRemovePixelPerCell = key_value.value + elif key_value.key == KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.perspectiveRemoveIgnoredMarginPerCell = key_value.value + elif key_value.key == KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.maxErroneousBitsInBorderRate = key_value.value + elif key_value.key == KEY_ERROR_CORRECTION_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.errorCorrectionRate = key_value.value + elif key_value.key == KEY_DETECT_INVERTED_MARKER: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.detectInvertedMarker = key_value.value + elif key_value.key == KEY_CORNER_REFINEMENT_METHOD: + if not isinstance(key_value, KeyValueSimpleString): + mismatched_keys.append(key_value.key) + continue + corner_refinement_method: str = key_value.value + if corner_refinement_method in CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: + # noinspection PyTypeChecker + detection_parameters.cornerRefinementMethod = \ + CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT[corner_refinement_method] + else: + raise MCTAnnotatorRuntimeError( + message=f"Failed to find corner refinement method {corner_refinement_method}.") + elif key_value.key == KEY_CORNER_REFINEMENT_WIN_SIZE: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementWinSize = key_value.value + elif key_value.key == KEY_CORNER_REFINEMENT_MAX_ITERATIONS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementMaxIterations = key_value.value + elif key_value.key == KEY_CORNER_REFINEMENT_MIN_ACCURACY: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementMinAccuracy = key_value.value + elif key_value.key == KEY_APRIL_TAG_CRITICAL_RAD: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagCriticalRad = key_value.value + elif key_value.key == KEY_APRIL_TAG_DEGLITCH: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagDeglitch = int(key_value.value) + elif key_value.key == KEY_APRIL_TAG_MAX_LINE_FIT_MSE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMaxLineFitMse = key_value.value + elif key_value.key == KEY_APRIL_TAG_MAX_N_MAXIMA: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMaxNmaxima = key_value.value + elif key_value.key == KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMinClusterPixels = key_value.value + elif key_value.key == KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMinWhiteBlackDiff = key_value.value + elif key_value.key == KEY_APRIL_TAG_QUAD_DECIMATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagQuadDecimate = key_value.value + elif key_value.key == KEY_APRIL_TAG_QUAD_SIGMA: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagQuadSigma = key_value.value + elif key_value.key == KEY_USE_ARUCO_3_DETECTION: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.useAruco3Detection = key_value.value + elif key_value.key == KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerLengthRatioOriginalImg = key_value.value + elif key_value.key == KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minSideLengthCanonicalImg = key_value.value + else: + mismatched_keys.append(key_value.key) + return mismatched_keys + + def get_changed_timestamp(self) -> datetime.datetime: + return self._update_timestamp_utc + + def get_markers_detected(self) -> list[MarkerSnapshot]: + return self._snapshots_identified + + def get_markers_rejected(self) -> list[MarkerSnapshot]: + return self._snapshots_unidentified + + def get_parameters(self) -> list[KeyValueMetaAny]: + return self.assign_aruco_detection_parameters_to_key_value_list(self._aruco_parameters) + + @staticmethod + def get_type_identifier() -> str: + return "aruco_opencv" + + @staticmethod + def _corner_image_point_list_from_embedded_list( + corner_image_points_px: list[list[float]] + ) -> list[MarkerCornerImagePoint]: + corner_image_point_list: list[MarkerCornerImagePoint] = list() + assert len(corner_image_points_px) == 4 + for corner_image_point_px in corner_image_points_px: + corner_image_point_list.append(MarkerCornerImagePoint( + x_px=corner_image_point_px[0], + y_px=corner_image_point_px[1])) + return corner_image_point_list + + # noinspection DuplicatedCode + def set_parameters( + self, + parameters: list[KeyValueSimpleAny] + ) -> None: + mismatched_keys: list[str] = self.assign_key_value_list_to_aruco_detection_parameters( + detection_parameters=self._aruco_parameters, + key_value_list=parameters) + if len(mismatched_keys) > 0: + raise MCTAnnotatorRuntimeError( + message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") + + def update( + self, + image: numpy.ndarray + ) -> None: + if self._aruco_dictionary is None: + message: str = "No ArUco dictionary has been set." + self.add_status_message(severity="error", message=message) + self.set_status(Annotator.Status.FAILURE) + return + + image_greyscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + (detected_corner_points_raw, detected_dictionary_indices, rejected_corner_points_raw) = cv2.aruco.detectMarkers( + image=image_greyscale, + dictionary=self._aruco_dictionary, + parameters=self._aruco_parameters) + + self._snapshots_identified = list() + # note: detected_indices is (inconsistently) None sometimes if nothing is detected + if detected_dictionary_indices is not None and len(detected_dictionary_indices) > 0: + detected_count = detected_dictionary_indices.size + # Shape of some output was previously observed to (also) be inconsistent... make it consistent here: + detected_corner_points_px = numpy.array(detected_corner_points_raw).reshape((detected_count, 4, 2)) + detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_count)) + for detected_index, detected_id in enumerate(detected_dictionary_indices): + detected_label: str = str(detected_id) + corner_image_points_px = detected_corner_points_px[detected_index] + corner_image_points: list[MarkerCornerImagePoint] = \ + self._corner_image_point_list_from_embedded_list( + corner_image_points_px=corner_image_points_px.tolist()) + self._snapshots_identified.append(MarkerSnapshot( + label=detected_label, + corner_image_points=corner_image_points)) + + self._snapshots_unidentified = list() + if rejected_corner_points_raw: + rejected_corner_points_px = numpy.array(rejected_corner_points_raw).reshape((-1, 4, 2)) + for rejected_index in range(rejected_corner_points_px.shape[0]): + corner_image_points_px = rejected_corner_points_px[rejected_index] + corner_image_points: list[MarkerCornerImagePoint] = \ + self._corner_image_point_list_from_embedded_list( + corner_image_points_px=corner_image_points_px.tolist()) + self._snapshots_unidentified.append(MarkerSnapshot( + label=f"unknown", + corner_image_points=corner_image_points)) + + self._update_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/implementations/camera_opencv_capture_device.py b/src/implementations/camera_opencv_capture_device.py similarity index 94% rename from src/detector/implementations/camera_opencv_capture_device.py rename to src/implementations/camera_opencv_capture_device.py index 0b361f7..c694933 100644 --- a/src/detector/implementations/camera_opencv_capture_device.py +++ b/src/implementations/camera_opencv_capture_device.py @@ -1,10 +1,9 @@ -from ..exceptions import MCTDetectorRuntimeError -from ..interfaces import AbstractCamera -from ..structures import \ - CameraConfiguration, \ - CameraStatus from src.common import \ + Camera, \ + CameraConfiguration, \ + CameraStatus, \ ImageUtils, \ + MCTCameraRuntimeError, \ StatusMessageSource from src.common.structures import \ ImageResolution, \ @@ -70,7 +69,7 @@ _CAMERA_GAMMA_RANGE_MAXIMUM: Final[int] = 300 -class OpenCVCaptureDeviceCamera(AbstractCamera): +class OpenCVCaptureDeviceCamera(Camera): _capture: cv2.VideoCapture | None _capture_device_id: str | int @@ -101,19 +100,19 @@ def get_changed_timestamp(self) -> datetime.datetime: def get_image(self) -> numpy.ndarray: if self._image is None: - raise MCTDetectorRuntimeError(message="There is no captured image.") + raise MCTCameraRuntimeError(message="There is no captured image.") return self._image def get_resolution(self) -> ImageResolution: if self._capture is None: - raise MCTDetectorRuntimeError(message="The camera is not active, and resolution cannot be retrieved.") + raise MCTCameraRuntimeError(message="The camera is not active, and resolution cannot be retrieved.") return ImageResolution( x_px=int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)), y_px=int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) def get_parameters(self) -> list[KeyValueMetaAbstract]: if self._capture is None: - raise MCTDetectorRuntimeError(message="The camera is not active, and properties cannot be retrieved.") + raise MCTCameraRuntimeError(message="The camera is not active, and properties cannot be retrieved.") return_value: list[KeyValueMetaAbstract] = list() @@ -184,7 +183,7 @@ def get_type_identifier() -> str: def set_parameters(self, parameters: list[KeyValueSimpleAny]) -> None: if self._capture is None: - raise MCTDetectorRuntimeError(message="Capture is None.") + raise MCTCameraRuntimeError(message="Capture is None.") mismatched_keys: list[str] = list() @@ -240,7 +239,7 @@ def set_parameters(self, parameters: list[KeyValueSimpleAny]) -> None: mismatched_keys.append(key_value.key) if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( + raise MCTCameraRuntimeError( message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") def start(self) -> None: @@ -254,7 +253,7 @@ def start(self) -> None: elif os.name == "posix": self._capture = cv2.VideoCapture(self._capture_device_id, cv2.CAP_V4L2) else: - raise MCTDetectorRuntimeError( + raise MCTCameraRuntimeError( message=f"The current platform ({os.name}) is not supported.") # NOTE: The USB3 cameras bought for this project appear to require some basic parameters to be set, @@ -291,7 +290,7 @@ def update(self) -> None: message: str = "Failed to grab frame." self.add_status_message(severity="error", message=message) self.set_status(CameraStatus.FAILURE) - raise MCTDetectorRuntimeError(message=message) + raise MCTCameraRuntimeError(message=message) retrieved_frame: bool retrieved_frame, self._image = self._capture.retrieve() @@ -299,6 +298,6 @@ def update(self) -> None: message: str = "Failed to retrieve frame." self.add_status_message(severity="error", message=message) self.set_status(CameraStatus.FAILURE) - raise MCTDetectorRuntimeError(message=message) + raise MCTCameraRuntimeError(message=message) self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/detector/implementations/camera_picamera2.py b/src/implementations/camera_picamera2.py similarity index 96% rename from src/detector/implementations/camera_picamera2.py rename to src/implementations/camera_picamera2.py index 09ca6c9..92a8586 100644 --- a/src/detector/implementations/camera_picamera2.py +++ b/src/implementations/camera_picamera2.py @@ -1,9 +1,9 @@ -from ..exceptions import MCTDetectorRuntimeError -from ..interfaces import AbstractCamera -from ..structures import \ +from src.common import \ + Camera, \ CameraConfiguration, \ - CameraStatus -from src.common import StatusMessageSource + CameraStatus, \ + MCTCameraRuntimeError, \ + StatusMessageSource from src.common.structures import \ ImageResolution, \ KeyValueSimpleAbstract, \ @@ -75,7 +75,7 @@ _PICAMERA2_SHARPNESS_KEY: Final[str] = "Sharpness" -class Picamera2Camera(AbstractCamera): +class Picamera2Camera(Camera): _camera: Picamera2 _camera_configuration: Picamera2Configuration @@ -102,12 +102,12 @@ def get_changed_timestamp(self) -> datetime.datetime: def get_image(self) -> numpy.ndarray: if self._image is None: - raise MCTDetectorRuntimeError(message="There is no captured image.") + raise MCTCameraRuntimeError(message="There is no captured image.") return self._image def get_parameters(self, **_kwargs) -> list[KeyValueMetaAbstract]: if self.get_status() != CameraStatus.RUNNING: - raise MCTDetectorRuntimeError(message="The capture is not active, and properties cannot be retrieved.") + raise MCTCameraRuntimeError(message="The capture is not active, and properties cannot be retrieved.") current_controls: dict = { # Custom settings shall override default values @@ -223,7 +223,7 @@ def set_parameters(self, parameters: list[KeyValueSimpleAny]) -> None: mismatched_keys.append(key_value.key) if len(mismatched_keys) > 0: - raise MCTDetectorRuntimeError( + raise MCTCameraRuntimeError( message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") if self.get_status() == CameraStatus.RUNNING: @@ -275,6 +275,6 @@ def update(self) -> None: message: str = "Failed to grab frame." self.add_status_message(severity="error", message=message) self.set_status(CameraStatus.FAILURE) - raise MCTDetectorRuntimeError(message=message) + raise MCTCameraRuntimeError(message=message) self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index e00621a..a84e1be 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -10,10 +10,11 @@ KeyValueSimpleAny, \ KeyValueSimpleString, \ MarkerSnapshot -from src.detector import IntrinsicCalibrator -from src.detector.implementations.marker_aruco_opencv import ArucoOpenCVMarker -from src.detector.structures import CalibratorConfiguration -from src.detector.util import KEY_CORNER_REFINEMENT_METHOD +from src.detector import \ + IntrinsicCalibrator +from src.implementations.annotator_aruco_opencv import \ + ArucoOpenCVAnnotator, \ + KEY_CORNER_REFINEMENT_METHOD from tempfile import TemporaryDirectory from typing import Final import unittest @@ -69,7 +70,7 @@ def test(self): calibration_result: IntrinsicCalibration | None = None with TemporaryDirectory() as temppath: calibrator: IntrinsicCalibrator = IntrinsicCalibrator( - configuration=CalibratorConfiguration(data_path=temppath), + configuration=IntrinsicCalibrator.Configuration(data_path=temppath), status_message_source=status_message_source) for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): for frame_id, image_filepath in image_filepaths_by_frame_id.items(): @@ -82,7 +83,7 @@ def test(self): image_resolution=IMAGE_RESOLUTION, marker_parameters=MARKER_DETECTION_PARAMETERS) - marker: ArucoOpenCVMarker = ArucoOpenCVMarker( + marker: ArucoOpenCVAnnotator = ArucoOpenCVAnnotator( configuration={"method": "aruco_opencv"}, status_message_source=status_message_source) marker.set_parameters(parameters=MARKER_DETECTION_PARAMETERS) From f2ae5538db0418b01cd95bbca74351db6ad66666 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 10 Jul 2025 18:08:59 -0400 Subject: [PATCH 07/33] WIP: Further abstraction; some consolidation --- src/board_builder/board_builder.py | 46 +- .../test/accuracy/accuracy_test.py | 19 +- .../test/accuracy/utils/generate_data.py | 20 +- .../utils/board_builder_pose_solver.py | 2 +- src/common/__init__.py | 16 +- src/common/annotator.py | 6 +- src/common/api.py | 22 +- src/common/camera.py | 4 +- src/common/exceptions.py | 8 - .../intrinsic_calibrator.py | 154 +---- src/common/mct_component.py | 24 +- src/common/structures/__init__.py | 46 +- src/common/structures/aruco.py | 136 ---- src/common/structures/detector.py | 31 - src/common/structures/image.py | 48 +- src/common/structures/linear_algebra.py | 21 +- src/common/structures/mct_component.py | 11 - src/common/structures/serialization.py | 60 +- .../{pose_solver.py => tracking.py} | 80 +-- src/common/util/image_utils.py | 6 +- src/controller/connection.py | 13 +- src/controller/mct_controller.py | 45 +- src/detector/__init__.py | 1 - src/detector/api.py | 76 +-- src/detector/detector.py | 53 +- src/detector/detector_app.py | 6 +- src/gui/panels/board_builder_panel.py | 50 +- src/gui/panels/calibrator_panel.py | 4 +- src/gui/panels/detector_panel.py | 38 +- src/gui/panels/pose_solver_panel.py | 4 +- .../specialized/calibration_image_table.py | 2 +- .../specialized/calibration_result_table.py | 2 +- src/implementations/annotator_aruco_opencv.py | 518 +-------------- .../camera_opencv_capture_device.py | 14 +- src/implementations/camera_picamera2.py | 16 +- src/implementations/common_aruco_opencv.py | 619 ++++++++++++++++++ .../intrinsic_charuco_opencv.py | 137 ++++ src/pose_solver/__init__.py | 14 + src/pose_solver/api.py | 24 +- src/pose_solver/pose_solver.py | 14 +- src/pose_solver/pose_solver_api.py | 38 +- src/pose_solver/structures.py | 16 +- src/slicer_connection.py | 11 - ...generate_target_definition_from_charuco.py | 4 +- src/util/measure_detector_to_reference.py | 8 +- test/test_extrinsic_calibration.py | 34 +- test/test_math_utils.py | 4 +- test/test_pose_solver.py | 139 ++-- 48 files changed, 1347 insertions(+), 1317 deletions(-) rename src/{detector => common}/intrinsic_calibrator.py (79%) delete mode 100644 src/common/structures/aruco.py delete mode 100644 src/common/structures/detector.py delete mode 100644 src/common/structures/mct_component.py rename src/common/structures/{pose_solver.py => tracking.py} (68%) create mode 100644 src/implementations/common_aruco_opencv.py create mode 100644 src/implementations/intrinsic_charuco_opencv.py create mode 100644 src/pose_solver/__init__.py diff --git a/src/board_builder/board_builder.py b/src/board_builder/board_builder.py index 7a7bf34..4c21b1a 100644 --- a/src/board_builder/board_builder.py +++ b/src/board_builder/board_builder.py @@ -8,7 +8,7 @@ from .utils import BoardBuilderPoseSolver from .structures import PoseLocation, MarkerCorners -from src.common.structures import Pose, MarkerSnapshot, Matrix4x4 +from src.common.structures import Pose, Annotation, Matrix4x4 from src.common.structures import Marker, TargetBoard _HOMOGENEOUS_POINT_COORD: Final[int] = 4 @@ -145,7 +145,7 @@ def _find_matrix_input_index(self, pose_uuid, other_pose_uuid): return pose_index, other_pose_index - def _solve_pose(self, detector_data: dict[str, list[MarkerSnapshot]], timestamp: datetime.datetime): + def _solve_pose(self, detector_data: dict[str, list[Annotation]], timestamp: datetime.datetime): """ Given marker ids and its corner locations, find its pose """ timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: @@ -157,16 +157,18 @@ def _solve_pose(self, detector_data: dict[str, list[MarkerSnapshot]], timestamp: self._matrix_id_index += 1 for detector_name in detector_data: - for marker_snapshot in detector_data[detector_name]: - corners_list: list[list[float]] = [] # Indexed as [point][coordinate] - for corner in marker_snapshot.corner_image_points: - corners_list.append([corner.x_px, corner.y_px]) + # assumes 4 corners per marker + for i in range(0, len(detector_data[detector_name]), 4): + corners_list: list[list[float]] = [ # Indexed as [point][coordinate] + [detector_data[detector_name][i].x_px, detector_data[detector_name][i].y_px], + [detector_data[detector_name][i+1].x_px, detector_data[detector_name][i+1].y_px], + [detector_data[detector_name][i+2].x_px, detector_data[detector_name][i+2].y_px], + [detector_data[detector_name][i+3].x_px, detector_data[detector_name][i+3].y_px]] marker_corners = MarkerCorners( detector_label=detector_name, - marker_id=int(marker_snapshot.label), + marker_id=int(detector_data[detector_name][i].label), points=corners_list, - timestamp=timestamp - ) + timestamp=timestamp) self.pose_solver.add_marker_corners([marker_corners]) target_poses = self.pose_solver.get_target_poses() @@ -201,7 +203,7 @@ def _write_corners_dict_to_repeatability_test_file(self, corners_dict): json.dump(data, file, indent=4) @staticmethod - def _write_detector_data_to_recording_file(detector_data: dict[str, list[MarkerSnapshot]], data_description: str): + def _write_detector_data_to_recording_file(detector_data: dict[str, list[Annotation]], data_description: str): formatted_data = {} timestamp = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() for detector_name, snapshots in detector_data.items(): @@ -209,9 +211,8 @@ def _write_detector_data_to_recording_file(detector_data: dict[str, list[MarkerS for snapshot in snapshots: snapshot_data = { "label": snapshot.label, - "corner_image_points": [{"x_px": pt.x_px, "y_px": pt.y_px} for pt in snapshot.corner_image_points], - "timestamp": timestamp - } + "corner_image_points": [snapshot.x_px, snapshot.y_px], + "timestamp": timestamp} formatted_data[detector_name].append(snapshot_data) current_dir = os.path.dirname(__file__) @@ -242,23 +243,24 @@ def _write_detector_data_to_recording_file(detector_data: dict[str, list[MarkerS json.dump(existing_data, f, indent=4) # public methods - def locate_reference_board(self, detector_data: dict[str, list[MarkerSnapshot]]): + def locate_reference_board(self, detector_data: dict[str, list[Annotation]]): # self._write_detector_data_to_recording_file(detector_data, "LOCATE REFERENCE DATA") if all(isinstance(v, list) and len(v) == 0 for v in detector_data.values()): return self.detector_poses = [] timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: - for marker_snapshot in detector_data[detector_name]: - corners_list: list[list[float]] = [] - for corner in marker_snapshot.corner_image_points: - corners_list.append([corner.x_px, corner.y_px]) + for i in range(0, len(detector_data[detector_name]), 4): + corners_list: list[list[float]] = [ # Indexed as [point][coordinate] + [detector_data[detector_name][i].x_px, detector_data[detector_name][i].y_px], + [detector_data[detector_name][i+1].x_px, detector_data[detector_name][i+1].y_px], + [detector_data[detector_name][i+2].x_px, detector_data[detector_name][i+2].y_px], + [detector_data[detector_name][i+3].x_px, detector_data[detector_name][i+3].y_px]] marker_corners = MarkerCorners( detector_label=detector_name, - marker_id=int(marker_snapshot.label), + marker_id=int(detector_data[detector_name][i].label), points=corners_list, - timestamp=timestamp - ) + timestamp=timestamp) self.pose_solver.add_marker_corners([marker_corners]) new_detector_poses = self.pose_solver.get_detector_poses() @@ -276,7 +278,7 @@ def locate_reference_board(self, detector_data: dict[str, list[MarkerSnapshot]]) self.detector_poses.append(pose) self.pose_solver.set_detector_poses(self.detector_poses) - def collect_data(self, detector_data: dict[str, list[MarkerSnapshot]]): + def collect_data(self, detector_data: dict[str, list[Annotation]]): """ Collects data of relative position and is entered in matrix. Returns a dictionary of its corners""" # self._write_detector_data_to_recording_file(detector_data, "COLLECTION DATA") detector_data = self._filter_markers_appearing_in_multiple_detectors(detector_data) diff --git a/src/board_builder/test/accuracy/accuracy_test.py b/src/board_builder/test/accuracy/accuracy_test.py index be4fa60..cb496c0 100644 --- a/src/board_builder/test/accuracy/accuracy_test.py +++ b/src/board_builder/test/accuracy/accuracy_test.py @@ -1,8 +1,7 @@ from src.board_builder.board_builder import BoardBuilder from src.common import MathUtils from src.common.structures import \ - MarkerCornerImagePoint, \ - MarkerSnapshot, \ + Annotation, \ TargetBoard, \ Marker from .structures import AccuracyTestParameters @@ -47,12 +46,10 @@ def _add_noise_to_corners(self, data): # Apply noise for i, corner in enumerate(marker_snapshot.corner_image_points): - noisy_corner_x = corner.x_px + noise[i * 2] - noisy_corner_y = corner.y_px + noise[i * 2 + 1] - noisy_corners.append(MarkerCornerImagePoint(x_px=noisy_corner_x, y_px=noisy_corner_y)) - - noisy_marker_snapshot = MarkerSnapshot(label=marker_snapshot.label, corner_image_points=noisy_corners) - noisy_marker_snapshots.append(noisy_marker_snapshot) + noisy_marker_snapshots.append(Annotation( + label=f"{marker_snapshot.label}_{i}", + x_px=corner.x_px + noise[i * 2], + y_px=corner.y_px + noise[i * 2 + 1])) noisy_data[detector_name] = noisy_marker_snapshots return noisy_data @@ -85,7 +82,7 @@ def transform_point(point, matrix): Marker(marker_id=marker.marker_id, marker_size=marker.marker_size, points=aligned_points)) # Return the aligned TargetBoard - return TargetBoard(target_id=target_board.target_id, markers=aligned_markers) + return TargetBoard(target_id=target_board.label, markers=aligned_markers) @staticmethod def _calculate_rms_error_of_two_corner_dataset( @@ -163,7 +160,7 @@ def _write_results_to_file(self, module_name: str, snapshots, two_dimension_coll "generated_board_poses": snapshots_serializable, "projected_2D_points": two_dimension_collection_data_serializable, "predicted_board": { - "target_id": predicted_board.target_id, + "target_id": predicted_board.label, "markers": [ { "marker_id": marker.marker_id, @@ -172,7 +169,7 @@ def _write_results_to_file(self, module_name: str, snapshots, two_dimension_coll ] }, "simulated_board": { - "target_id": simulated_board.target_id, + "target_id": simulated_board.label, "markers": [ { "marker_id": marker.marker_id, diff --git a/src/board_builder/test/accuracy/utils/generate_data.py b/src/board_builder/test/accuracy/utils/generate_data.py index dcd957b..89e0cc8 100644 --- a/src/board_builder/test/accuracy/utils/generate_data.py +++ b/src/board_builder/test/accuracy/utils/generate_data.py @@ -1,11 +1,8 @@ -import math -import random import numpy as np from src.board_builder.test.accuracy.structures import AccuracyTestParameters from .projection import projection from collections import defaultdict - -from src.common.structures import MarkerSnapshot, MarkerCornerImagePoint +from src.common.structures import Annotation def find_z_axis_intersection(matrix4x4): @@ -76,7 +73,7 @@ def generate_data(board_coordinates, detector_poses, remove_markers_out_of_frame Also trims the data to remove any occluded marker """ parameters = AccuracyTestParameters() - collection_data: dict[str, list[MarkerSnapshot]] = {} + collection_data: dict[str, list[Annotation]] = {} occluded_markers = defaultdict(list, {pose.target_id: [] for pose in detector_poses}) # A list of markers that are occluded (self occlusion or perpendicular) # Collect data @@ -100,13 +97,12 @@ def generate_data(board_coordinates, detector_poses, remove_markers_out_of_frame marker_corners.append(pixel) if len(marker_corners) == 4: - marker_snapshot = MarkerSnapshot(label=str(marker), corner_image_points=[]) - marker_snapshot.label = str(marker) - marker_corner_image_point_list = [] - for marker_corner in marker_corners: - marker_corner_image_point = MarkerCornerImagePoint(x_px=marker_corner[0], y_px=marker_corner[1]) - marker_corner_image_point_list.append(marker_corner_image_point) - marker_snapshot_list.append(MarkerSnapshot(label=str(marker), corner_image_points=marker_corner_image_point_list)) + for corner_index, marker_corner in enumerate(marker_corners): + corner_label: str = f"{str(marker)}_{corner_index}" + marker_snapshot_list.append(Annotation( + label=corner_label, + x_px=marker_corner[0], + y_px=marker_corner[1])) collection_data[pose.target_id] = marker_snapshot_list diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py index 396418d..d3819ff 100644 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ b/src/board_builder/utils/board_builder_pose_solver.py @@ -211,7 +211,7 @@ def add_target_marker( if isinstance(target, TargetMarker) and marker_id == target.marker_id: return False target: TargetBase = TargetMarker( - target_id=str(marker_id), + label=str(marker_id), marker_id=str(marker_id), marker_size=self._board_marker_size) target_id: uuid.UUID = uuid.uuid4() diff --git a/src/common/__init__.py b/src/common/__init__.py index 1d82bfd..230fe4c 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -18,16 +18,18 @@ Camera, \ MCTCameraRuntimeError from .exceptions import \ - MCTError, \ - MCTParsingError + MCTError +from .intrinsic_calibrator import \ + IntrinsicCalibrator, \ + MCTIntrinsicCalibrationError +from .mct_component import MCTComponent +from .status_messages import \ + SeverityLabel, \ + StatusMessage, \ + StatusMessageSource from .util import \ ImageUtils, \ IOUtils, \ MathUtils, \ NetworkUtils, \ PythonUtils -from .mct_component import MCTComponent -from .status_messages import \ - SeverityLabel, \ - StatusMessage, \ - StatusMessageSource diff --git a/src/common/annotator.py b/src/common/annotator.py index a24d8dd..d770aba 100644 --- a/src/common/annotator.py +++ b/src/common/annotator.py @@ -4,7 +4,7 @@ SeverityLabel, \ StatusMessageSource from .structures import \ - MarkerSnapshot, \ + Annotation, \ KeyValueMetaAny, \ KeyValueSimpleAny import abc @@ -70,10 +70,10 @@ def set_status(self, status: Status) -> None: def get_changed_timestamp(self) -> datetime.datetime: ... @abc.abstractmethod - def get_markers_detected(self) -> list[MarkerSnapshot]: ... + def get_markers_detected(self) -> list[Annotation]: ... @abc.abstractmethod - def get_markers_rejected(self) -> list[MarkerSnapshot]: ... + def get_markers_rejected(self) -> list[Annotation]: ... @abc.abstractmethod def get_parameters(self) -> list[KeyValueMetaAny]: ... diff --git a/src/common/api.py b/src/common/api.py index a23c2c3..22c6477 100644 --- a/src/common/api.py +++ b/src/common/api.py @@ -1,11 +1,11 @@ from .status_messages import StatusMessage -from .structures import MCTParsable +from .structures import MCTDeserializable import abc from pydantic import BaseModel, Field, SerializeAsAny from typing import Final, Literal -class MCTRequest(BaseModel, MCTParsable, abc.ABC): +class MCTRequest(BaseModel, MCTDeserializable, abc.ABC): parsable_type: str @@ -13,7 +13,7 @@ class MCTRequestSeries(BaseModel): series: list[SerializeAsAny[MCTRequest]] = Field() -class MCTResponse(BaseModel, MCTParsable, abc.ABC): +class MCTResponse(BaseModel, MCTDeserializable, abc.ABC): parsable_type: str @@ -26,7 +26,7 @@ class EmptyResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "empty" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return EmptyResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -37,7 +37,7 @@ class ErrorResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "error" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return ErrorResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -50,7 +50,7 @@ class DequeueStatusMessagesRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return DequeueStatusMessagesRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -61,7 +61,7 @@ class DequeueStatusMessagesResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return DequeueStatusMessagesResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -74,7 +74,7 @@ class TimeSyncStartRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "time_sync_start_request" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return TimeSyncStartRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -85,7 +85,7 @@ class TimeSyncStopRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "time_sync_stop_request" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return TimeSyncStopRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -96,7 +96,7 @@ class TimestampGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "timestamp_get_request" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return TimestampGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -109,7 +109,7 @@ class TimestampGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "timestamp_get_response" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return TimestampGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints diff --git a/src/common/camera.py b/src/common/camera.py index f625447..ee6ac9d 100644 --- a/src/common/camera.py +++ b/src/common/camera.py @@ -4,7 +4,7 @@ SeverityLabel, \ StatusMessageSource from .structures import \ - CaptureFormat, \ + ImageFormat, \ ImageResolution, \ KeyValueSimpleAny, \ KeyValueMetaAbstract @@ -69,7 +69,7 @@ def add_status_message( def get_encoded_image( self, - image_format: CaptureFormat, + image_format: ImageFormat, requested_resolution: ImageResolution | None # None means to not alter the image dimensions ) -> str: image: numpy.ndarray = self.get_image() diff --git a/src/common/exceptions.py b/src/common/exceptions.py index 8fc5103..97abf2f 100644 --- a/src/common/exceptions.py +++ b/src/common/exceptions.py @@ -1,11 +1,3 @@ class MCTError(Exception): def __init__(self, *args): super().__init__(*args) - - -class MCTParsingError(MCTError): - message: str - - def __init__(self, message: str, *args): - super().__init__(args) - self.message = message diff --git a/src/detector/intrinsic_calibrator.py b/src/common/intrinsic_calibrator.py similarity index 79% rename from src/detector/intrinsic_calibrator.py rename to src/common/intrinsic_calibrator.py index aa711f3..8d2492f 100644 --- a/src/detector/intrinsic_calibrator.py +++ b/src/common/intrinsic_calibrator.py @@ -1,22 +1,13 @@ -from src.common import \ - ImageUtils, \ - IOUtils, \ - MCTError, \ - StatusMessageSource -from src.common.structures import \ - CharucoBoardSpecification, \ +from .exceptions import MCTError +from .status_messages import StatusMessageSource +from .structures import \ ImageResolution, \ IntrinsicCalibration, \ - IntrinsicCalibrationFrameResult, \ - IntrinsicParameters, \ - KeyValueSimpleAny, \ - Vec3 -# TODO: -# Intrinsic Calibration could have different implementations. -# This one depends on ArUco, and it may make sense to make an abstraction. -from src.implementations.annotator_aruco_opencv import ArucoOpenCVAnnotator -import cv2 -import cv2.aruco + KeyValueSimpleAny +from .util import \ + ImageUtils, \ + IOUtils +import abc import datetime from enum import StrEnum import json @@ -52,7 +43,7 @@ class _ImageState(StrEnum): class _ImageMetadata(BaseModel): identifier: str = Field() - label: str = Field(default_factory=str) + label: str = Field(default_factory=str) # human-readable label timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) state: _ImageState = Field(default=_ImageState.SELECT) @@ -113,7 +104,7 @@ def from_dict(in_dict: dict[ImageResolution, _DataMapValue]): return _DataMap(entries=entries) -class IntrinsicCalibrator: +class IntrinsicCalibrator(abc.ABC): Configuration: type[_Configuration] = _Configuration ImageState: type[_ImageState] = _ImageState ImageMetadata: type[_ImageMetadata] = _ImageMetadata @@ -187,8 +178,7 @@ def add_image( def calculate( self, - image_resolution: ImageResolution, - marker_parameters: list[KeyValueSimpleAny] + image_resolution: ImageResolution ) -> tuple[str, IntrinsicCalibration]: """ :returns: a tuple containing a result identifier (GUID as string) and the IntrinsicCalibration structure @@ -204,23 +194,8 @@ def calculate( map_key=calibration_key, result_identifier=result_identifier) - - aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() - mismatched_keys: list[str] = ArucoOpenCVAnnotator.assign_key_value_list_to_aruco_detection_parameters( - detection_parameters=aruco_detector_parameters, - key_value_list=marker_parameters) - if len(mismatched_keys) > 0: - raise MCTIntrinsicCalibrationError( - message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") - - # TODO: ChArUco board to come from somewhere (user? currently assumed to be 10x8 DICT4x4_100) - charuco_spec = CharucoBoardSpecification() - # noinspection PyUnresolvedReferences - charuco_board: cv2.aruco.CharucoBoard = charuco_spec.create_board() - calibration_value: _DataMapValue = self._calibration_map[calibration_key] - all_charuco_corners = list() - all_charuco_ids = list() + # don't load images right away in case of memory constraints image_identifiers: list[str] = list() for image_metadata in calibration_value.image_metadata_list: if image_metadata.state != _ImageState.SELECT: @@ -234,100 +209,12 @@ def calculate( message=f"Image {image_metadata.identifier} was not found. " f"It will be omitted from the calibration.") continue - image_rgb = cv2.imread(image_filepath) - image_greyscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) - (marker_corners, marker_ids, _) = cv2.aruco.detectMarkers( - image=image_greyscale, - dictionary=charuco_spec.aruco_dictionary(), - parameters=aruco_detector_parameters) - if len(marker_corners) <= 0: - self._status_message_source.enqueue_status_message( - severity="warning", - message=f"Image {image_metadata.identifier} did not appear to contain any identifiable markers. " - f"It will be omitted from the calibration.") - continue image_identifiers.append(image_metadata.identifier) - # Note: - # Marker corners are the corners of the markers, whereas - # ChArUco corners are the corners of the chessboard. - # ChArUco calibration function works with the corners of the chessboard. - _, frame_charuco_corners, frame_charuco_ids = cv2.aruco.interpolateCornersCharuco( - markerCorners=marker_corners, - markerIds=marker_ids, - image=image_greyscale, - board=charuco_board, - ) - # Algorithm requires a minimum of 4 markers - if frame_charuco_corners is not None and len(frame_charuco_corners) >= 4: - all_charuco_corners.append(frame_charuco_corners) - all_charuco_ids.append(frame_charuco_ids) - - if len(all_charuco_corners) <= 0: - raise MCTIntrinsicCalibrationError(message="The input images did not contain visible markers.") - - # outputs to be stored in these containers - calibration_result = cv2.aruco.calibrateCameraCharucoExtended( - charucoCorners=all_charuco_corners, - charucoIds=all_charuco_ids, - board=charuco_board, - imageSize=numpy.array(charuco_spec.size_mm(), dtype="int32"), # Exception if float - cameraMatrix=numpy.identity(3, dtype='f'), - distCoeffs=numpy.zeros(5, dtype='f')) - - charuco_overall_reprojection_error = calibration_result[0] - charuco_camera_matrix = calibration_result[1] - charuco_distortion_coefficients = calibration_result[2] - charuco_rotation_vectors = calibration_result[3] - charuco_translation_vectors = calibration_result[4] - charuco_intrinsic_stdevs = calibration_result[5] - charuco_extrinsic_stdevs = calibration_result[6] - charuco_reprojection_errors = calibration_result[7] - - # TODO: Assertion on size of distortion coefficients being 5? - # Note: OpenCV documentation specifies the order of distortion coefficients - # https://docs.opencv.org/4.x/d9/d6a/group__aruco.html#ga366993d29fdddd995fba8c2e6ca811ea - # So far I have not seen calibration return a number of coefficients other than 5. - # Note too that there is an unchecked expectation that radial distortion be monotonic. - - intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration( - timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), + + intrinsic_calibration, image_identifiers = self._calculate_implementation( image_resolution=image_resolution, - reprojection_error=charuco_overall_reprojection_error, - calibrated_values=IntrinsicParameters( - focal_length_x_px=charuco_camera_matrix[0, 0], - focal_length_y_px=charuco_camera_matrix[1, 1], - optical_center_x_px=charuco_camera_matrix[0, 2], - optical_center_y_px=charuco_camera_matrix[1, 2], - radial_distortion_coefficients=[ - charuco_distortion_coefficients[0, 0], - charuco_distortion_coefficients[1, 0], - charuco_distortion_coefficients[4, 0]], - tangential_distortion_coefficients=[ - charuco_distortion_coefficients[2, 0], - charuco_distortion_coefficients[3, 0]]), - calibrated_stdevs=[value[0] for value in charuco_intrinsic_stdevs], - marker_parameters=marker_parameters, - frame_results=[ - IntrinsicCalibrationFrameResult( - image_identifier=image_identifiers[i], - translation=Vec3( - x=charuco_translation_vectors[i][0, 0], - y=charuco_translation_vectors[i][1, 0], - z=charuco_translation_vectors[i][2, 0]), - rotation=Vec3( - x=charuco_rotation_vectors[i][0, 0], - y=charuco_rotation_vectors[i][1, 0], - z=charuco_rotation_vectors[i][2, 0]), - translation_stdev=Vec3( - x=charuco_extrinsic_stdevs[i*6 + 3, 0], - y=charuco_extrinsic_stdevs[i*6 + 4, 0], - z=charuco_extrinsic_stdevs[i*6 + 5, 0]), - rotation_stdev=Vec3( - x=charuco_extrinsic_stdevs[i*6 + 0, 0], - y=charuco_extrinsic_stdevs[i*6 + 1, 0], - z=charuco_extrinsic_stdevs[i*6 + 2, 0]), - reprojection_error=charuco_reprojection_errors[i, 0]) - for i in range(0, len(charuco_reprojection_errors))]) + image_identifiers=image_identifiers) + IOUtils.json_write( filepath=result_filepath, json_dict=intrinsic_calibration.model_dump(), @@ -336,7 +223,6 @@ def calculate( message=msg), on_error_for_dev=logger.error, ignore_none=True) - result_metadata: IntrinsicCalibrator.ResultMetadata = IntrinsicCalibrator.ResultMetadata( identifier=result_identifier, image_identifiers=image_identifiers) @@ -346,6 +232,14 @@ def calculate( self.save() return result_identifier, intrinsic_calibration + @abc.abstractmethod + def _calculate_implementation( + self, + image_resolution: ImageResolution, + image_identifiers: list[str] + ) -> tuple[IntrinsicCalibration, list[str]]: # image_identifiers that were actually used in calibration + pass + def _delete_if_exists(self, filepath: str): try: os.remove(filepath) diff --git a/src/common/mct_component.py b/src/common/mct_component.py index deef008..44d566d 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -11,12 +11,13 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from .exceptions import MCTParsingError from .status_messages import \ SeverityLabel, \ StatusMessage, \ StatusMessageSource -from .structures import MCTParsable +from .structures import \ + MCTDeserializable, \ + MCTSerializationError from .util import \ PythonUtils import abc @@ -29,7 +30,7 @@ logger = logging.getLogger(__name__) -ParsableDynamicSingle = TypeVar('ParsableDynamicSingle', bound=MCTParsable) +SerializableSingle = TypeVar('SerializableSingle', bound=MCTDeserializable) class MCTComponent(abc.ABC): @@ -69,13 +70,13 @@ def add_status_subscriber( def parse_dynamic_series_list( self, parsable_series_dict: dict, - supported_types: list[type[ParsableDynamicSingle]] - ) -> list[ParsableDynamicSingle]: + supported_types: list[type[SerializableSingle]] + ) -> list[SerializableSingle]: try: - return MCTParsable.parse_dynamic_series_list( - parsable_series_dict=parsable_series_dict, + return MCTDeserializable.deserialize_series_list( + series_dict=parsable_series_dict, supported_types=supported_types) - except MCTParsingError as e: + except MCTSerializationError as e: self.add_status_message( severity="error", message=e.message) @@ -94,6 +95,11 @@ def dequeue_status_messages(self, **kwargs) -> DequeueStatusMessagesResponse: return DequeueStatusMessagesResponse( status_messages=status_messages) + @staticmethod + @abc.abstractmethod + def get_role_label(): + pass + def get_status_message_source(self): return self._status_message_source @@ -143,7 +149,7 @@ async def websocket_handler(self, websocket: WebSocket) -> None: request_series_list: list[MCTRequest] = self.parse_dynamic_series_list( parsable_series_dict=request_series_dict, supported_types=list(self.supported_request_types().keys())) - except MCTParsingError as e: + except MCTSerializationError as e: logger.exception(str(e)) await websocket.send_json(MCTResponseSeries().model_dump()) continue diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py index 684ab26..4f759b8 100644 --- a/src/common/structures/__init__.py +++ b/src/common/structures/__init__.py @@ -1,40 +1,17 @@ -from .aruco import \ - CharucoBoardSpecification, \ - CornerRefinementMethod, \ - CORNER_REFINEMENT_METHOD_NONE, \ - CORNER_REFINEMENT_METHOD_SUBPIX, \ - CORNER_REFINEMENT_METHOD_CONTOUR,\ - CORNER_REFINEMENT_METHOD_APRILTAG, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT, \ - MarkerDefinition -from .detector import \ - CaptureFormat, \ - DetectorFrame, \ - MarkerCornerImagePoint, \ - MarkerSnapshot from .image import \ + Annotation, \ + ImageFormat, \ ImageResolution, \ IntrinsicCalibration, \ - IntrinsicCalibrationFrameResult, \ - IntrinsicParameters + IntrinsicParameters, \ + RELATION_CHARACTER from .linear_algebra import \ IterativeClosestPointParameters, \ + Landmark, \ Matrix4x4, \ Pose, \ Ray, \ - Vec3 -from .mct_component import \ - ComponentRoleLabel, \ - COMPONENT_ROLE_LABEL_DETECTOR, \ - COMPONENT_ROLE_LABEL_POSE_SOLVER -from .pose_solver import \ - Marker, \ - PoseSolverFrame, \ - PoseSolverStatus, \ - TargetBase, \ - TargetBoard, \ - TargetMarker + Target from .serialization import \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ @@ -49,4 +26,13 @@ KeyValueMetaFloat, \ KeyValueMetaInt, \ key_value_meta_to_simple, \ - MCTParsable + MCTSerializationError, \ + MCTDeserializable +from .tracking import \ + Annotation, \ + DetectorFrame, \ + Marker, \ + PoseSolverFrame, \ + TargetBase, \ + TargetBoard, \ + TargetMarker diff --git a/src/common/structures/aruco.py b/src/common/structures/aruco.py deleted file mode 100644 index 9059334..0000000 --- a/src/common/structures/aruco.py +++ /dev/null @@ -1,136 +0,0 @@ -import base64 -import cv2 -import cv2.aruco -import numpy -from pydantic import BaseModel, Field -from typing import Any, Final, Literal, Tuple - - -CornerRefinementMethod = Literal["NONE", "SUBPIX", "CONTOUR", "APRILTAG"] -CORNER_REFINEMENT_METHOD_NONE: Final[str] = 'NONE' -CORNER_REFINEMENT_METHOD_SUBPIX: Final[str] = 'SUBPIX' -CORNER_REFINEMENT_METHOD_CONTOUR: Final[str] = 'CONTOUR' -CORNER_REFINEMENT_METHOD_APRILTAG: Final[str] = 'APRILTAG' - - -CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: dict[CornerRefinementMethod, int] = { - "NONE": cv2.aruco.CORNER_REFINE_NONE, - "SUBPIX": cv2.aruco.CORNER_REFINE_SUBPIX, - "CONTOUR": cv2.aruco.CORNER_REFINE_CONTOUR, - "APRILTAG": cv2.aruco.CORNER_REFINE_APRILTAG} - - -CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: dict[int, CornerRefinementMethod] = { - cv2.aruco.CORNER_REFINE_NONE: "NONE", - cv2.aruco.CORNER_REFINE_SUBPIX: "SUBPIX", - cv2.aruco.CORNER_REFINE_CONTOUR: "CONTOUR", - cv2.aruco.CORNER_REFINE_APRILTAG: "APRILTAG"} - - -class CharucoBoardSpecification(BaseModel): - dictionary_name: str = Field(default="DICT_4X4_100") - square_count_x: int = Field(default=8) - square_count_y: int = Field(default=10) - square_size_px: int = Field(default=800) - marker_size_px: int = Field(default=400) - px_per_mm: float = Field(default=40) - - def aruco_dictionary(self) -> Any: # type cv2.aruco.Dictionary - if self.dictionary_name != "DICT_4X4_100": - raise NotImplementedError("Only DICT_4X4_100 is currently implemented") - aruco_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) - return aruco_dictionary - - def size_px(self) -> Tuple[float, float]: - board_size_x_px = self.square_count_x * self.square_size_px - board_size_y_px = self.square_count_y * self.square_size_px - return board_size_x_px, board_size_y_px - - def size_mm(self) -> Tuple[float, float]: - board_size_x_mm = self.square_count_x * self.square_size_px / self.px_per_mm - board_size_y_mm = self.square_count_y * self.square_size_px / self.px_per_mm - return board_size_x_mm, board_size_y_mm - - def create_board(self) -> Any: # type cv2.aruco.CharucoBoard - charuco_board = cv2.aruco.CharucoBoard( - size=(self.square_count_x, self.square_count_y), - squareLength=self.square_size_px, - markerLength=self.marker_size_px, - dictionary=self.aruco_dictionary()) - return charuco_board - - def get_marker_center_points(self) -> list[list[float]]: - """ - Note that the coordinates assume (based on portrait orientation): - origin: at bottom-left of board - x-axis: goes right - y-axis: goes up the page - z-axis: comes out of the image and toward the viewer - """ - points = [] - for y in range(self.square_count_y): - for x in range(self.square_count_x): - if (x + y) % 2 == 1: # Only add the points for the white squares - point_x = (x + 0.5) * self.square_size_px / self.px_per_mm - point_y = (self.square_count_y - y - 0.5) * self.square_size_px / self.px_per_mm - points.append([point_x, point_y, 0.0]) - return points - - def get_marker_corner_points(self) -> list[list[float]]: - """ - Note that the coordinates assume the same axes as get_marker_center_points, - but the origin is in the center of the board, not the bottom-left corner. - """ - points = [] - marker_size_mm: float = self.marker_size_px / self.px_per_mm - square_size_mm: float = self.square_size_px / self.px_per_mm - for y_sq in range(self.square_count_y): - for x_sq in range(self.square_count_x): - if (x_sq + y_sq) % 2 == 1: # Only add the points for the white squares - x_sq_centered: float = x_sq - (self.square_count_x / 2.0) - y_sq_centered: float = y_sq - (self.square_count_y / 2.0) - for corner_index in range(0, 4): - x_mm: float = (x_sq_centered + 0.5) * square_size_mm - if corner_index == 0 or corner_index == 3: - x_mm -= (marker_size_mm / 2.0) - else: - x_mm += (marker_size_mm / 2.0) - y_mm: float = (-(y_sq_centered + 0.5)) * square_size_mm - if corner_index == 0 or corner_index == 1: - y_mm += (marker_size_mm / 2.0) - else: - y_mm -= (marker_size_mm / 2.0) - z_mm: float = 0.0 - points.append([x_mm, y_mm, z_mm]) - return points - - def get_marker_ids(self) -> list[int]: - num_markers = self.square_count_x * self.square_count_y // 2 - return list(range(num_markers)) - - -class MarkerDefinition(BaseModel): - # TODO: This is unused at the time of writing 2025-07-09, deletion should be assessed - - label: str = Field() - representation_single_base64: str = Field() # representation from a single rotation only - - def representation_all_base64(self): - """ - OpenCV ArUco expects to receive all possible rotations of a marker. We generate these programmatically. - """ - representation_single_bytes: bytes = base64.b64decode(self.representation_single_base64) - representation_single_list: list[bool] = list(representation_single_bytes) - representation_single_matrix: numpy.ndarray = numpy.asarray( - a=representation_single_list, - dtype=bool) - marker_side_length_bits: int = int(numpy.sqrt(len(representation_single_list))) - representation_single_matrix = numpy.reshape( - a=representation_single_matrix, - newshape=(marker_side_length_bits, marker_side_length_bits)) - representation_all_list: list[bool] = list(representation_single_matrix.flatten()) - for i in range(3): - representation_single_matrix = numpy.rot90(representation_single_matrix) - representation_all_list += list(representation_single_matrix.flatten()) - representation_all_bytes: bytes = bytes(representation_all_list) - return base64.b64encode(representation_all_bytes) diff --git a/src/common/structures/detector.py b/src/common/structures/detector.py deleted file mode 100644 index e19ae7b..0000000 --- a/src/common/structures/detector.py +++ /dev/null @@ -1,31 +0,0 @@ -from .image import ImageResolution -import datetime -from enum import StrEnum -from pydantic import BaseModel, Field -from typing import Final - - -class CaptureFormat(StrEnum): - FORMAT_PNG: Final[str] = ".png" - FORMAT_JPG: Final[str] = ".jpg" - - -class MarkerCornerImagePoint(BaseModel): - # TODO: Some types of markers may not refer to "corners" per se, so it may be worth renaming this class - x_px: float = Field() - y_px: float = Field() - - -class MarkerSnapshot(BaseModel): - label: str = Field() # Empty indicates that something was detected but not identified - corner_image_points: list[MarkerCornerImagePoint] = Field() - - -class DetectorFrame(BaseModel): - detected_marker_snapshots: list[MarkerSnapshot] | None = Field() - rejected_marker_snapshots: list[MarkerSnapshot] | None = Field() - timestamp_utc_iso8601: str = Field() - image_resolution: ImageResolution = Field() - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/structures/image.py b/src/common/structures/image.py index 86df23f..2b9091b 100644 --- a/src/common/structures/image.py +++ b/src/common/structures/image.py @@ -1,7 +1,35 @@ -from .serialization import KeyValueSimpleAny -from .linear_algebra import Vec3 +from enum import StrEnum import math -from pydantic import BaseModel, Field, SerializeAsAny +from pydantic import BaseModel, Field +from typing import ClassVar, Final + + +RELATION_CHARACTER: Final[str] = "$" + + +class Annotation(BaseModel): + """ + A distinct point as detected on a detector image. + """ + + UNIDENTIFIED_LABEL: ClassVar[str] = str() + + label: str = Field() # Empty indicates that something was detected but not identified + x_px: float = Field() + y_px: float = Field() + + def base_label(self): + """ + Part of the label before the RELATED_PREFIX. + """ + if RELATION_CHARACTER not in self.label: + return self.label + return self.label[0:self.label.index(RELATION_CHARACTER)] + + +class ImageFormat(StrEnum): + FORMAT_PNG: Final[str] = ".png" + FORMAT_JPG: Final[str] = ".jpg" class ImageResolution(BaseModel): @@ -113,20 +141,8 @@ def generate_zero_parameters( tangential_distortion_coefficients=[0.0, 0.0]) -class IntrinsicCalibrationFrameResult(BaseModel): - image_identifier: str = Field() - translation: Vec3 = Field() - rotation: Vec3 = Field() - translation_stdev: Vec3 = Field() - rotation_stdev: Vec3 = Field() - reprojection_error: float = Field() - - class IntrinsicCalibration(BaseModel): timestamp_utc: str = Field() image_resolution: ImageResolution = Field() - reprojection_error: float = Field() calibrated_values: IntrinsicParameters = Field() - calibrated_stdevs: list[float] = Field() - marker_parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() - frame_results: list[IntrinsicCalibrationFrameResult] = Field(default=list()) + supplemental_data: dict = Field() diff --git a/src/common/structures/linear_algebra.py b/src/common/structures/linear_algebra.py index d9a1f7b..bd48e5b 100644 --- a/src/common/structures/linear_algebra.py +++ b/src/common/structures/linear_algebra.py @@ -21,6 +21,17 @@ class IterativeClosestPointParameters(BaseModel): termination_rms_point_distance: float = Field() # root-mean-square +class Landmark(BaseModel): + """ + A distinct point in 3D space. + Coordinates are in the unit of the user's choosing. + """ + label: str = Field() + x: float = Field() + y: float = Field() + z: float = Field() + + class Matrix4x4(BaseModel): @staticmethod @@ -102,7 +113,6 @@ class Pose(BaseModel): solver_timestamp_utc_iso8601: str = Field() -# TODO: Turn this into a pydantic class class Ray: source_point: list[float] direction: list[float] @@ -120,11 +130,8 @@ def __init__( self.direction = direction -class Vec3(BaseModel): +class Target(BaseModel): """ - Simply a container for x, y, and z coordinates. - TODO: This has limited usage, it may be best to delete it + A trackable object. """ - x: float = Field() - y: float = Field() - z: float = Field() + landmarks: list[Landmark] diff --git a/src/common/structures/mct_component.py b/src/common/structures/mct_component.py deleted file mode 100644 index e81a6fc..0000000 --- a/src/common/structures/mct_component.py +++ /dev/null @@ -1,11 +0,0 @@ -from enum import StrEnum -from typing import Final - - -COMPONENT_ROLE_LABEL_DETECTOR: Final[str] = "detector" -COMPONENT_ROLE_LABEL_POSE_SOLVER: Final[str] = "pose_solver" -class ComponentRoleLabel(StrEnum): - DETECTOR: Final[str] = COMPONENT_ROLE_LABEL_DETECTOR - POSE_SOLVER: Final[str] = COMPONENT_ROLE_LABEL_POSE_SOLVER - - diff --git a/src/common/structures/serialization.py b/src/common/structures/serialization.py index 4c199b2..ee873e0 100644 --- a/src/common/structures/serialization.py +++ b/src/common/structures/serialization.py @@ -1,4 +1,4 @@ -from src.common.exceptions import MCTParsingError +from src.common.exceptions import MCTError import abc from pydantic import BaseModel, Field, ValidationError from typing import Final, Literal, TypeVar, Union @@ -136,53 +136,61 @@ def key_value_meta_to_simple( return [key_value_meta.to_simple() for key_value_meta in key_value_meta_list] -ParsableDynamic = TypeVar('ParsableDynamic', bound='MCTParsable') +DeserializableT = TypeVar('DeserializableT', bound='MCTParsable') -class MCTParsable(abc.ABC): +class MCTSerializationError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class MCTDeserializable(abc.ABC): @staticmethod @abc.abstractmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: pass @staticmethod - def parse_dynamic_series_list( - parsable_series_dict: dict, - supported_types: list[type[ParsableDynamic]] - ) -> list[ParsableDynamic]: - if "series" not in parsable_series_dict or not isinstance(parsable_series_dict["series"], list): + def deserialize_series_list( + series_dict: dict, + supported_types: list[type[DeserializableT]] + ) -> list[DeserializableT]: + if "series" not in series_dict or not isinstance(series_dict["series"], list): message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." - raise MCTParsingError(message) + raise MCTSerializationError(message) - output_series: list[ParsableDynamic] = list() - for parsable_dict in parsable_series_dict["series"]: + output_series: list[DeserializableT] = list() + for parsable_dict in series_dict["series"]: if not isinstance(parsable_dict, dict): message: str = "series contained a non-dict element. Input is improperly formatted." - raise MCTParsingError(message) - output_series.append(MCTParsable.parse_dynamic_single( - parsable_dict=parsable_dict, + raise MCTSerializationError(message) + output_series.append(MCTDeserializable.deserialize_single( + single_dict=parsable_dict, supported_types=supported_types)) return output_series @staticmethod - def parse_dynamic_single( - parsable_dict: dict, - supported_types: list[type[ParsableDynamic]] - ) -> ParsableDynamic: - if "parsable_type" not in parsable_dict or not isinstance(parsable_dict["parsable_type"], str): + def deserialize_single( + single_dict: dict, + supported_types: list[type[DeserializableT]] + ) -> DeserializableT: + if "parsable_type" not in single_dict or not isinstance(single_dict["parsable_type"], str): message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." - raise MCTParsingError(message) from None + raise MCTSerializationError(message) from None for supported_type in supported_types: - if parsable_dict["parsable_type"] == supported_type.parsable_type_identifier(): - request: ParsableDynamic + if single_dict["parsable_type"] == supported_type.parsable_type_identifier(): + request: DeserializableT try: - request = supported_type(**parsable_dict) + request = supported_type(**single_dict) except ValidationError as e: - raise MCTParsingError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None + raise MCTSerializationError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None return request message: str = "parsable_type did not match any expected value. Input is improperly formatted." - raise MCTParsingError(message) + raise MCTSerializationError(message) diff --git a/src/common/structures/pose_solver.py b/src/common/structures/tracking.py similarity index 68% rename from src/common/structures/pose_solver.py rename to src/common/structures/tracking.py index 79471a6..f7c33c7 100644 --- a/src/common/structures/pose_solver.py +++ b/src/common/structures/tracking.py @@ -1,10 +1,41 @@ +from .image import Annotation, ImageResolution from .linear_algebra import Pose import abc import datetime -from enum import IntEnum import numpy from pydantic import BaseModel, Field, PrivateAttr -from typing import Final + + +class DetectorFrame(BaseModel): + annotations: list[Annotation] = Field(default_factory=list) + timestamp_utc_iso8601: str = Field() + image_resolution: ImageResolution = Field() + + @property + def annotations_identified(self): + return [annotation for annotation in self.annotations if annotation.label != Annotation.UNIDENTIFIED_LABEL] + + @property + def annotations_unidentified(self): + return [annotation for annotation in self.annotations if annotation.label == Annotation.UNIDENTIFIED_LABEL] + + @property + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +class PoseSolverFrame(BaseModel): + detector_poses: list[Pose] | None = Field() + target_poses: list[Pose] | None = Field() + timestamp_utc_iso8601: str = Field() + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +# -------------------------------------------------------------------------------- +# Everything below should be assessed for either migration or deletion +# -------------------------------------------------------------------------------- class Marker(BaseModel): @@ -12,21 +43,6 @@ class Marker(BaseModel): marker_size: float | None = Field(default=None) points: list[list[float]] | None = Field(default=None) - # TODO: During validation, make sure either marker_size or points is defined, but not both. - - def get_marker_size(self) -> float: - if self.marker_size is None: - if self.points is None or len(self.points) < 2: - raise RuntimeError("TargetMarker defined with neither marker_size nor enough points.") - marker_size_sum: float = 0.0 - for point_index in range(0, len(self.points)): - point_a: numpy.ndarray = numpy.asarray(self.points[point_index]) - point_b: numpy.ndarray = numpy.asarray(self.points[point_index-1]) - vector: numpy.ndarray = point_a - point_b - marker_size_sum += numpy.linalg.norm(vector) - self.marker_size = marker_size_sum / len(self.points) - return self.marker_size - def get_points_internal(self) -> list[list[float]]: # Use the TargetBase.get_points() instead. if self.points is None: @@ -42,7 +58,7 @@ def get_points_internal(self) -> list[list[float]]: class TargetBase(BaseModel, abc.ABC): - target_id: str = Field() + label: str = Field() @abc.abstractmethod def get_marker_ids(self) -> list[str]: ... @@ -63,7 +79,7 @@ def get_points(self) -> list[list[float]]: def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: if marker_id != self.marker_id: - raise IndexError(f"marker_id {marker_id} is not in target {self.target_id}") + raise IndexError(f"marker_id {marker_id} is not in target {self.label}") return self.get_points_internal() @@ -90,29 +106,5 @@ def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: for marker in self.markers: self._marker_dict[marker.marker_id] = marker if marker_id not in self._marker_dict: - raise IndexError(f"marker_id {marker_id} is not in target {self.target_id}") + raise IndexError(f"marker_id {marker_id} is not in target {self.label}") return self._marker_dict[marker_id].points - - -class PoseSolverStatus: - - class Solve(IntEnum): - STOPPED: Final[int] = 0 - RUNNING: Final[int] = 1 - FAILURE: Final[int] = 2 - - solve_status: Solve - solve_errors: list[str] - - def __init__(self): - self.solve_status = PoseSolverStatus.Solve.STOPPED - self.solve_errors = list() - - -class PoseSolverFrame(BaseModel): - detector_poses: list[Pose] | None = Field() - target_poses: list[Pose] | None = Field() - timestamp_utc_iso8601: str = Field() - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/util/image_utils.py b/src/common/util/image_utils.py index b4e39f1..ed13c88 100644 --- a/src/common/util/image_utils.py +++ b/src/common/util/image_utils.py @@ -1,5 +1,5 @@ from src.common.structures import \ - CaptureFormat, \ + ImageFormat, \ ImageResolution import base64 import cv2 @@ -77,7 +77,7 @@ def image_resize_to_fit( @staticmethod def image_to_base64( image_data: numpy.ndarray, - image_format: CaptureFormat = ".png", + image_format: ImageFormat = ".png", ) -> str: """ :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) @@ -93,7 +93,7 @@ def image_to_base64( @staticmethod def image_to_bytes( image_data: numpy.ndarray, - image_format: CaptureFormat = ".png", + image_format: ImageFormat = ".png", ) -> bytes: """ :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) diff --git a/src/controller/connection.py b/src/controller/connection.py index 95a5de5..b080411 100644 --- a/src/controller/connection.py +++ b/src/controller/connection.py @@ -10,13 +10,12 @@ StatusMessage, \ TimestampGetResponse from src.common.structures import \ - ComponentRoleLabel, \ DetectorFrame, \ ImageResolution, \ IntrinsicParameters, \ KeyValueSimpleAny, \ Matrix4x4, \ - MCTParsable, \ + MCTDeserializable, \ Pose, \ PoseSolverFrame, \ TargetBase @@ -89,7 +88,7 @@ class ComponentAddress: def __init__( self, label: str, - role: ComponentRoleLabel, + role: str, ip_address: IPv4Address, port: int ): @@ -146,7 +145,7 @@ class Report: Human-readable information that shall be shown to a user about a connection. """ label: str - role: ComponentRoleLabel + role: str ip_address: str port: int status: str @@ -154,7 +153,7 @@ class Report: def __init__( self, label: str, - role: ComponentRoleLabel, + role: str, ip_address: str, port: int, status: str @@ -333,8 +332,8 @@ def _send_recv(self) -> SendRecvResult: def _response_series_converter( response_series_dict: dict ) -> MCTResponseSeries: - series_list: list[MCTResponse] = MCTParsable.parse_dynamic_series_list( - parsable_series_dict=response_series_dict, + series_list: list[MCTResponse] = MCTDeserializable.deserialize_series_list( + series_dict=response_series_dict, supported_types=self.supported_response_types()) return MCTResponseSeries(series=series_list) diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index f59e9e0..454b139 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -21,20 +21,19 @@ TimeSyncStartRequest, \ TimeSyncStopRequest from src.common.structures import \ - ComponentRoleLabel, \ - COMPONENT_ROLE_LABEL_DETECTOR, \ - COMPONENT_ROLE_LABEL_POSE_SOLVER, \ DetectorFrame, \ IntrinsicParameters, \ PoseSolverFrame -from src.detector.api import \ +from src.detector import \ CalibrationResultGetActiveRequest, \ CalibrationResultGetActiveResponse, \ CameraResolutionGetRequest, \ CameraResolutionGetResponse, \ + Detector, \ DetectorFrameGetRequest, \ DetectorFrameGetResponse -from src.pose_solver.api import \ +from src.pose_solver import \ + PoseSolverAPI, \ PoseSolverAddDetectorFrameRequest, \ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ @@ -49,13 +48,17 @@ import numpy import os from pydantic import ValidationError -from typing import Callable, Final, get_args, TypeVar +from typing import Callable, Final, TypeVar import uuid logger = logging.getLogger(__name__) ConnectionType = TypeVar('ConnectionType', bound=Connection) +_ROLE_LABEL: Final[str] = "controller" +_SUPPORTED_ROLES: Final[list[str]] = [ + Detector.get_role_label(), + PoseSolverAPI.get_role_label()] _TIME_SYNC_SAMPLE_MAXIMUM_COUNT: Final[int] = 5 @@ -160,11 +163,11 @@ def add_connection( label = component_address.label if label in self._connections: raise RuntimeError(f"Connection associated with {label} already exists.") - if component_address.role == COMPONENT_ROLE_LABEL_DETECTOR: + if component_address.role == Detector.get_role_label(): return_value: DetectorConnection = DetectorConnection(component_address=component_address) self._connections[label] = return_value return return_value - elif component_address.role == COMPONENT_ROLE_LABEL_POSE_SOLVER: + elif component_address.role == PoseSolverAPI.get_role_label(): return_value: PoseSolverConnection = PoseSolverConnection(component_address=component_address) self._connections[label] = return_value return return_value @@ -271,13 +274,13 @@ def get_active_detector_labels(self) -> list[str]: """ See get_component_labels. """ - return self.get_component_labels(role=COMPONENT_ROLE_LABEL_DETECTOR, active=True) + return self.get_component_labels(role=Detector.get_role_label(), active=True) def get_active_pose_solver_labels(self) -> list[str]: """ See get_component_labels. """ - return self.get_component_labels(role=COMPONENT_ROLE_LABEL_POSE_SOLVER, active=True) + return self.get_component_labels(role=PoseSolverAPI.get_role_label(), active=True) def get_component_labels( self, @@ -289,7 +292,7 @@ def get_component_labels( None provided to `role` or `active` is treated as a wildcard (i.e. not filtered on that criteria). """ if role is not None: - if role not in ComponentRoleLabel: + if role not in _SUPPORTED_ROLES: raise ValueError(f"role must be among the valid values for ComponentRoleLabel") return_value: list[str] = list() for connection_label, connection in self._connections.items(): @@ -363,6 +366,10 @@ def get_live_pose_solver_frame( target_poses=pose_solver_connection.target_poses, timestamp_utc_iso8601=pose_solver_connection.poses_timestamp.isoformat()) + @staticmethod + def get_role_label(): + return _ROLE_LABEL + def get_status(self) -> Status: return self._status @@ -433,7 +440,7 @@ def handle_response_detector_frame_get( return frame: DetectorFrame = response.frame adjusted_timestamp_utc: datetime.datetime = \ - frame.timestamp_utc() - datetime.timedelta(seconds=detector_connection.controller_offset_seconds) + frame.timestamp_utc - datetime.timedelta(seconds=detector_connection.controller_offset_seconds) frame.timestamp_utc_iso8601 = adjusted_timestamp_utc.isoformat() detector_connection.latest_frame = frame @@ -576,9 +583,9 @@ def recording_stop(self): connection_type=Connection) report = connection.get_report() # Do not record if specified - if report.role == COMPONENT_ROLE_LABEL_DETECTOR and not self._recording_detector: + if report.role == Detector.get_role_label() and not self._recording_detector: continue - if report.role == COMPONENT_ROLE_LABEL_POSE_SOLVER and not self._recording_pose_solver: + if report.role == PoseSolverAPI.get_role_label() and not self._recording_pose_solver: continue if self._recording_save_path is not None: @@ -683,7 +690,7 @@ def start_up( raise RuntimeError("Cannot start up if controller isn't first stopped.") for connection in self._connections.values(): if mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == COMPONENT_ROLE_LABEL_POSE_SOLVER: + connection.get_role() == PoseSolverAPI.get_role_label(): continue connection.start_up() @@ -721,7 +728,7 @@ def update( all_connected: bool = True for connection in connections: if self._startup_mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == COMPONENT_ROLE_LABEL_POSE_SOLVER: + connection.get_role() == PoseSolverAPI.get_role_label(): continue if not connection.is_start_up_finished(): all_connected = False @@ -777,7 +784,7 @@ def update( detector_label=detector_label) if current_detector_frame is None: continue - current_detector_frame_timestamp: datetime.datetime = current_detector_frame.timestamp_utc() + current_detector_frame_timestamp: datetime.datetime = current_detector_frame.timestamp_utc current_is_new: bool = False if detector_label in pose_solver_connection.detector_timestamps: old_detector_frame_timestamp = \ @@ -789,9 +796,9 @@ def update( if current_is_new: pose_solver_connection.detector_timestamps[detector_label] = \ current_detector_frame_timestamp - adjusted_detector_frame: DetectorFrame = current_detector_frame.copy() + adjusted_detector_frame: DetectorFrame = current_detector_frame.model_copy() adjusted_timestamp_utc: datetime.datetime = \ - current_detector_frame.timestamp_utc() + \ + current_detector_frame.timestamp_utc + \ datetime.timedelta(seconds=pose_solver_connection.controller_offset_seconds) adjusted_detector_frame.timestamp_utc_iso8601 = adjusted_timestamp_utc.isoformat() marker_request: PoseSolverAddDetectorFrameRequest = PoseSolverAddDetectorFrameRequest( diff --git a/src/detector/__init__.py b/src/detector/__init__.py index de7443b..67a4432 100644 --- a/src/detector/__init__.py +++ b/src/detector/__init__.py @@ -33,7 +33,6 @@ AnnotatorParametersGetRequest, \ AnnotatorParametersGetResponse, \ AnnotatorParametersSetRequest -from .intrinsic_calibrator import IntrinsicCalibrator from .detector import \ Detector, \ DetectorConfiguration diff --git a/src/detector/api.py b/src/detector/api.py index 1b4190d..4c35485 100644 --- a/src/detector/api.py +++ b/src/detector/api.py @@ -1,14 +1,14 @@ from src.common import \ + IntrinsicCalibrator, \ MCTRequest, \ MCTResponse from src.common.structures import \ - CaptureFormat, \ + ImageFormat, \ DetectorFrame, \ IntrinsicCalibration, \ ImageResolution, \ KeyValueMetaAny, \ KeyValueSimpleAny -from .intrinsic_calibrator import IntrinsicCalibrator from pydantic import Field, SerializeAsAny from typing import Final, Literal @@ -17,7 +17,7 @@ class AnnotatorParametersGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return AnnotatorParametersGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -28,7 +28,7 @@ class AnnotatorParametersGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return AnnotatorParametersGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -41,7 +41,7 @@ class AnnotatorParametersSetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_set" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return AnnotatorParametersSetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -54,7 +54,7 @@ class CalibrationCalculateRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_calculate" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationCalculateRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -67,7 +67,7 @@ class CalibrationCalculateResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_calculate" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationCalculateResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -81,7 +81,7 @@ class CalibrationDeleteStagedRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_delete_staged" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationDeleteStagedRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -92,7 +92,7 @@ class CalibrationImageAddRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_add" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationImageAddRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -103,7 +103,7 @@ class CalibrationImageAddResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_add" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationImageAddResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -116,7 +116,7 @@ class CalibrationImageGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationImageGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -129,7 +129,7 @@ class CalibrationImageGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationImageGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -142,7 +142,7 @@ class CalibrationImageMetadataListRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_list" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationImageMetadataListRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -155,7 +155,7 @@ class CalibrationImageMetadataListResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_list" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationImageMetadataListResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -168,7 +168,7 @@ class CalibrationImageMetadataUpdateRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_update" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationImageMetadataUpdateRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -183,7 +183,7 @@ class CalibrationResolutionListRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_detector_resolutions_list" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResolutionListRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -194,7 +194,7 @@ class CalibrationResolutionListResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_detector_resolutions_list" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResolutionListResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -207,7 +207,7 @@ class CalibrationResultGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResultGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -220,7 +220,7 @@ class CalibrationResultGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResultGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -233,7 +233,7 @@ class CalibrationResultGetActiveRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_active_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResultGetActiveRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -244,7 +244,7 @@ class CalibrationResultGetActiveResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_active_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResultGetActiveResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -257,7 +257,7 @@ class CalibrationResultMetadataListRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_list" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResultMetadataListRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -270,7 +270,7 @@ class CalibrationResultMetadataListResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_list" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResultMetadataListResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -283,7 +283,7 @@ class CalibrationResultMetadataUpdateRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_update" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CalibrationResultMetadataUpdateRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -298,13 +298,13 @@ class CameraImageGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraImageGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - format: CaptureFormat = Field() + format: ImageFormat = Field() requested_resolution: ImageResolution | None = Field(default=None) @@ -312,13 +312,13 @@ class CameraImageGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraImageGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - format: CaptureFormat = Field() + format: ImageFormat = Field() image_base64: str = Field() @@ -326,7 +326,7 @@ class CameraParametersGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraParametersGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -337,7 +337,7 @@ class CameraParametersGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraParametersGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -350,7 +350,7 @@ class CameraParametersSetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraParametersSetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -363,7 +363,7 @@ class CameraParametersSetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraParametersSetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -376,7 +376,7 @@ class CameraResolutionGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraResolutionGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -387,7 +387,7 @@ class CameraResolutionGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return CameraResolutionGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -400,7 +400,7 @@ class DetectorFrameGetRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return DetectorFrameGetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -414,7 +414,7 @@ class DetectorFrameGetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return DetectorFrameGetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -427,7 +427,7 @@ class DetectorStartRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_start" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return DetectorStartRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -438,7 +438,7 @@ class DetectorStopRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "detector_stop" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return DetectorStopRequest._TYPE_IDENTIFIER # noinspection PyTypeHints diff --git a/src/detector/detector.py b/src/detector/detector.py index ef4ddc6..4080514 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -33,14 +33,13 @@ DetectorFrameGetResponse, \ DetectorStartRequest, \ DetectorStopRequest -from .intrinsic_calibrator import \ - IntrinsicCalibrator, \ - MCTIntrinsicCalibrationError from src.common import \ Annotator, \ Camera, \ EmptyResponse, \ ErrorResponse, \ + IntrinsicCalibrator, \ + MCTIntrinsicCalibrationError, \ MCTCameraRuntimeError, \ MCTComponent, \ MCTAnnotatorRuntimeError, \ @@ -51,25 +50,25 @@ DetectorFrame, \ ImageResolution, \ IntrinsicCalibration, \ - KeyValueMetaAbstract, \ - KeyValueMetaAny, \ - key_value_meta_to_simple, \ - KeyValueSimpleAny + KeyValueMetaAbstract import logging -from typing import Callable +from typing import Callable, Final from pydantic import BaseModel, Field logger = logging.getLogger(__name__) +_ROLE_LABEL: Final[str] = "detector" + + class DetectorConfiguration(BaseModel): """ Top-level schema for Detector initialization data """ calibrator_configuration: IntrinsicCalibrator.Configuration = Field() camera_configuration: Camera.Configuration = Field() - marker_configuration: Annotator.Configuration = Field() + annotator_configuration: Annotator.Configuration = Field() class Detector(MCTComponent): @@ -78,7 +77,7 @@ class Detector(MCTComponent): _calibrator: IntrinsicCalibrator _camera: Camera - _marker: Annotator + _annotator: Annotator _frame_count: int @@ -86,7 +85,7 @@ def __init__( self, detector_configuration: DetectorConfiguration, camera_type: type[Camera], - marker_type: type[Annotator] + annotator_type: type[Annotator] ): super().__init__( status_source_label="detector", @@ -99,8 +98,8 @@ def __init__( self._camera = camera_type( configuration=detector_configuration.camera_configuration, status_message_source=self.get_status_message_source()) - self._marker = marker_type( - configuration=detector_configuration.marker_configuration, + self._annotator = annotator_type( + configuration=detector_configuration.annotator_configuration, status_message_source=self.get_status_message_source()) self._frame_count = 0 @@ -115,11 +114,8 @@ def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | Erro result_identifier: str intrinsic_calibration: IntrinsicCalibration try: - marker_parameters_kvm: list[KeyValueMetaAny] = self._marker.get_parameters() - marker_parameters_kvs: list[KeyValueSimpleAny] = key_value_meta_to_simple(marker_parameters_kvm) result_identifier, intrinsic_calibration = self._calibrator.calculate( - image_resolution=request.image_resolution, - marker_parameters=marker_parameters_kvs) + image_resolution=request.image_resolution) except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationCalculateResponse( @@ -289,14 +285,13 @@ def detector_frame_get(self, **kwargs) -> DetectorFrameGetResponse | ErrorRespon detector_frame: DetectorFrame try: detector_frame = DetectorFrame( - detected_marker_snapshots=list(), - rejected_marker_snapshots=list(), - timestamp_utc_iso8601=self._marker.get_changed_timestamp().isoformat(), + annotations=list(), + timestamp_utc_iso8601=self._annotator.get_changed_timestamp().isoformat(), image_resolution=self._camera.get_resolution()) if request.include_detected: - detector_frame.detected_marker_snapshots = self._marker.get_markers_detected() + detector_frame.annotations += self._annotator.get_markers_detected() if request.include_rejected: - detector_frame.rejected_marker_snapshots = self._marker.get_markers_rejected() + detector_frame.annotations += self._annotator.get_markers_rejected() except (MCTCameraRuntimeError, MCTAnnotatorRuntimeError) as e: return ErrorResponse(message=e.message) return DetectorFrameGetResponse(frame=detector_frame) @@ -315,9 +310,13 @@ def detector_stop(self, **_kwargs) -> EmptyResponse | ErrorResponse: return ErrorResponse(message=e.message) return EmptyResponse() + @staticmethod + def get_role_label(): + return _ROLE_LABEL + def marker_parameters_get(self, **_kwargs) -> AnnotatorParametersGetResponse | ErrorResponse: try: - parameters = self._marker.get_parameters() + parameters = self._annotator.get_parameters() except MCTAnnotatorRuntimeError as e: return ErrorResponse(message=e.message) return AnnotatorParametersGetResponse(parameters=parameters) @@ -328,7 +327,7 @@ def marker_parameters_set(self, **kwargs) -> EmptyResponse | ErrorResponse: key="request", arg_type=AnnotatorParametersSetRequest) try: - self._marker.set_parameters(parameters=request.parameters) + self._annotator.set_parameters(parameters=request.parameters) except MCTAnnotatorRuntimeError as e: return ErrorResponse(message=e.message) return EmptyResponse() @@ -367,10 +366,10 @@ async def update(self): self._camera.update() except MCTCameraRuntimeError as e: self.add_status_message(severity="error", message=e.message) - if self._marker.get_status() == Annotator.Status.RUNNING and \ - self._camera.get_changed_timestamp() > self._marker.get_changed_timestamp(): + if self._annotator.get_status() == Annotator.Status.RUNNING and \ + self._camera.get_changed_timestamp() > self._annotator.get_changed_timestamp(): try: - self._marker.update(self._camera.get_image()) + self._annotator.update(self._camera.get_image()) except MCTAnnotatorRuntimeError as e: self.add_status_message(severity="error", message=e.message) self._frame_count += 1 diff --git a/src/detector/detector_app.py b/src/detector/detector_app.py index ed21d7e..b404836 100644 --- a/src/detector/detector_app.py +++ b/src/detector/detector_app.py @@ -57,16 +57,16 @@ def create_app() -> FastAPI: raise RuntimeError(f"Unsupported camera driver {detector_configuration.camera_configuration.driver}.") marker_type: type[Annotator] - if detector_configuration.marker_configuration.method == "aruco_opencv": + if detector_configuration.annotator_configuration.method == "aruco_opencv": from src.implementations.annotator_aruco_opencv import ArucoOpenCVAnnotator marker_type = ArucoOpenCVAnnotator else: - raise RuntimeError(f"Unsupported marker method {detector_configuration.marker_configuration.method}.") + raise RuntimeError(f"Unsupported marker method {detector_configuration.annotator_configuration.method}.") detector = Detector( detector_configuration=detector_configuration, camera_type=camera_type, - marker_type=marker_type) + annotator_type=marker_type) detector_app = FastAPI() # CORS Middleware diff --git a/src/gui/panels/board_builder_panel.py b/src/gui/panels/board_builder_panel.py index 699ef96..f61ed50 100644 --- a/src/gui/panels/board_builder_panel.py +++ b/src/gui/panels/board_builder_panel.py @@ -19,7 +19,7 @@ from src.common.structures import \ DetectorFrame, \ ImageResolution, \ - MarkerSnapshot, \ + Annotation, \ Matrix4x4, \ PoseSolverFrame, \ Pose @@ -79,7 +79,7 @@ def __init__( _build_board_button: wx.Button _repeatability_testing_checkbox: ParameterCheckbox _reset_button: wx.Button - _live_markers_detected: list[MarkerSnapshot] + _live_markers_detected: list[Annotation] _tracked_target_poses: list[Pose] # This could maybe be added to the LiveDetectorPreview class @@ -366,7 +366,7 @@ def update_loop(self) -> None: self._renderer.render() if self._controller.is_running(): - detector_data: dict[str, list[MarkerSnapshot]] = {} + detector_data: dict[str, list[Annotation]] = {} should_refresh = False # Add this flag for preview in self.live_detector_previews: @@ -387,7 +387,7 @@ def update_loop(self) -> None: self.handle_response_series(response_series) should_refresh = True # Only refresh when new data is received - detector_data[detector_label] = preview.detector_frame.detected_marker_snapshots + detector_data[detector_label] = preview.detector_frame.annotations_identified if detector_data: self._run_board_builder(detector_data) @@ -406,12 +406,12 @@ def _begin_capture_snapshot(self, preview: LiveDetectorPreview): connection_label=preview.detector_label, request_series=request_series) - def _draw_all_corners(self, detected_marker_snapshots, scale, frame, color): + def _draw_all_corners(self, annotations, scale, frame, color): """ Takes in a dictionary of marker UUIDs to their corners and draws each set of corners on the frame with different colors. """ - corners = self._marker_snapshot_list_to_opencv_points(detected_marker_snapshots, scale) + corners = self._marker_snapshot_list_to_opencv_points(annotations, scale) cv2.polylines( img=frame, @@ -439,14 +439,26 @@ def _handle_capture_snapshot_response( @staticmethod def _marker_snapshot_list_to_opencv_points( - marker_snapshot_list: list[MarkerSnapshot], - scale: float + marker_snapshot_list: list[Annotation], + scale: float ) -> numpy.ndarray: - corners: list[list[list[(float, float)]]] = [[[ - (corner_point.x_px * scale, corner_point.y_px * scale) - for corner_point in marker.corner_image_points - ]] for marker in marker_snapshot_list] - return_value = numpy.array(corners, dtype=numpy.int32) + if len(marker_snapshot_list) <= 0: + return numpy.asarray([], dtype=numpy.int32) + return_value: list[list[list[(float, float)]]] = list() + current_base_label: str = marker_snapshot_list[0].base_label() + current_shape_points: list[list[(float, float)]] = [[ + marker_snapshot_list[0].x_px * scale, + marker_snapshot_list[0].y_px * scale]] + for marker_snapshot in marker_snapshot_list: + annotation_base_label = marker_snapshot.base_label() + if annotation_base_label != current_base_label: + return_value.append(current_shape_points) + current_base_label = annotation_base_label + current_shape_points.append([ + marker_snapshot.x_px * scale, + marker_snapshot.y_px * scale]) + return_value.append(current_shape_points) + return_value = numpy.asarray(return_value, dtype=numpy.int32) return return_value def _on_build_board_button_click(self, event: wx.CommandEvent) -> None: @@ -568,9 +580,17 @@ def _process_frame(self, preview: LiveDetectorPreview): if scale is not None: if self._annotate_detected_checkbox.checkbox.GetValue(): - self._draw_all_corners(preview.detector_frame.detected_marker_snapshots, scale, display_image, [255, 191, 127]) + self._draw_all_corners( + annotations=preview.detector_frame.annotations_identified, + scale=scale, + frame=display_image, + color=[255, 191, 127]) if self._annotate_rejected_checkbox.checkbox.GetValue(): - self._draw_all_corners(preview.detector_frame.rejected_marker_snapshots, scale, display_image, [127, 191, 255]) + self._draw_all_corners( + annotations=preview.detector_frame.annotations, + scale=scale, + frame=display_image, + color=[127, 191, 255]) image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") image_buffer_io: BytesIO = BytesIO(image_buffer) diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/calibrator_panel.py index 312a1a0..a35517e 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/calibrator_panel.py @@ -12,6 +12,7 @@ ErrorResponse, \ EmptyResponse, \ ImageUtils, \ + IntrinsicCalibrator, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ @@ -35,8 +36,7 @@ CalibrationResultGetResponse, \ CalibrationResultMetadataListRequest, \ CalibrationResultMetadataListResponse, \ - CalibrationResultMetadataUpdateRequest, \ - IntrinsicCalibrator + CalibrationResultMetadataUpdateRequest from io import BytesIO import logging from typing import Optional diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index 7426859..6cb64ce 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -16,11 +16,11 @@ MCTResponseSeries, \ StatusMessageSource from src.common.structures import \ - CaptureFormat, \ DetectorFrame, \ + ImageFormat, \ ImageResolution, \ KeyValueSimpleAny, \ - MarkerSnapshot + Annotation from src.controller import \ MCTController from src.detector.api import \ @@ -60,7 +60,7 @@ "SUBPIX", "CONTOUR", "APRILTAG"] -_CAPTURE_FORMAT: CaptureFormat = ".jpg" +_CAPTURE_FORMAT: ImageFormat = ImageFormat.FORMAT_JPG _CAMERA_PARAMETER_SLOT_COUNT: Final[int] = 100 @@ -73,8 +73,8 @@ class DetectorPanel(BasePanel): _live_preview_request_id: uuid.UUID | None _live_preview_image_base64: str | None - _live_markers_detected: list[MarkerSnapshot] - _live_markers_rejected: list[MarkerSnapshot] + _live_markers_detected: list[Annotation] + _live_markers_rejected: list[Annotation] _live_resolution: ImageResolution | None _detector_selector: ParameterSelector @@ -401,14 +401,26 @@ def _handle_get_detection_parameters_response( @staticmethod def _marker_snapshot_list_to_opencv_points( - marker_snapshot_list: list[MarkerSnapshot], + marker_snapshot_list: list[Annotation], scale: float ) -> numpy.ndarray: - corners: list[list[list[(float, float)]]] = [[[ - (corner_point.x_px * scale, corner_point.y_px * scale) - for corner_point in marker.corner_image_points - ]] for marker in marker_snapshot_list] - return_value = numpy.array(corners, dtype=numpy.int32) + if len(marker_snapshot_list) <= 0: + return numpy.asarray([], dtype=numpy.int32) + return_value: list[list[list[(float, float)]]] = list() + current_base_label: str = marker_snapshot_list[0].base_label() + current_shape_points: list[list[(float, float)]] = [[ + marker_snapshot_list[0].x_px * scale, + marker_snapshot_list[0].y_px * scale]] + for marker_snapshot in marker_snapshot_list: + annotation_base_label = marker_snapshot.base_label() + if annotation_base_label != current_base_label: + return_value.append(current_shape_points) + current_base_label = annotation_base_label + current_shape_points.append([ + marker_snapshot.x_px * scale, + marker_snapshot.y_px * scale]) + return_value.append(current_shape_points) + return_value = numpy.asarray(return_value, dtype=numpy.int32) return return_value def on_calibration_capture_pressed(self, _event: wx.CommandEvent): @@ -478,8 +490,8 @@ def update_loop(self): detector_frame: DetectorFrame | None = self._controller.get_live_detector_frame( detector_label=detector_label) if detector_frame is not None: - self._live_markers_detected = detector_frame.detected_marker_snapshots - self._live_markers_rejected = detector_frame.rejected_marker_snapshots + self._live_markers_detected = detector_frame.annotations_identified + self._live_markers_rejected = detector_frame.annotations_unidentified self._live_resolution = detector_frame.image_resolution if self._preview_image_checkbox.checkbox.GetValue() and self._live_preview_request_id is None: if self._live_resolution is not None: diff --git a/src/gui/panels/pose_solver_panel.py b/src/gui/panels/pose_solver_panel.py index a8355cf..26a42fa 100644 --- a/src/gui/panels/pose_solver_panel.py +++ b/src/gui/panels/pose_solver_panel.py @@ -202,10 +202,10 @@ def update_loop(self) -> None: for detector_label in detector_labels: retrieved_detector_frame: DetectorFrame = self._controller.get_live_detector_frame( detector_label=detector_label) - retrieved_detector_frame_timestamp: datetime.datetime = retrieved_detector_frame.timestamp_utc() + retrieved_detector_frame_timestamp: datetime.datetime = retrieved_detector_frame.timestamp_utc if detector_label in self._latest_detector_frames: latest_detector_frame: DetectorFrame = self._latest_detector_frames[detector_label] - latest_detector_frame_timestamp: datetime.datetime = latest_detector_frame.timestamp_utc() + latest_detector_frame_timestamp: datetime.datetime = latest_detector_frame.timestamp_utc if retrieved_detector_frame_timestamp > latest_detector_frame_timestamp: self._latest_detector_frames[detector_label] = retrieved_detector_frame else: diff --git a/src/gui/panels/specialized/calibration_image_table.py b/src/gui/panels/specialized/calibration_image_table.py index 440d623..03cd314 100644 --- a/src/gui/panels/specialized/calibration_image_table.py +++ b/src/gui/panels/specialized/calibration_image_table.py @@ -1,5 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.detector import IntrinsicCalibrator +from src.common import IntrinsicCalibrator from typing import Final import wx diff --git a/src/gui/panels/specialized/calibration_result_table.py b/src/gui/panels/specialized/calibration_result_table.py index 778f1ab..d7e69e9 100644 --- a/src/gui/panels/specialized/calibration_result_table.py +++ b/src/gui/panels/specialized/calibration_result_table.py @@ -1,5 +1,5 @@ from .row_selection_table import RowSelectionTable -from src.detector import IntrinsicCalibrator +from src.common import IntrinsicCalibrator from typing import Final import wx diff --git a/src/implementations/annotator_aruco_opencv.py b/src/implementations/annotator_aruco_opencv.py index ee16a19..d56b546 100644 --- a/src/implementations/annotator_aruco_opencv.py +++ b/src/implementations/annotator_aruco_opencv.py @@ -1,83 +1,29 @@ +from .common_aruco_opencv import ArucoOpenCVCommon from src.common import \ Annotator, \ MCTAnnotatorRuntimeError, \ StatusMessageSource from src.common.structures import \ - CornerRefinementMethod, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT, \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT, \ + Annotation, \ KeyValueMetaAny, \ - KeyValueMetaBool, \ - KeyValueMetaEnum, \ - KeyValueMetaFloat, \ - KeyValueMetaInt, \ - KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ - KeyValueSimpleBool, \ - KeyValueSimpleFloat, \ - KeyValueSimpleInt, \ - KeyValueSimpleString, \ - MarkerCornerImagePoint, \ - MarkerSnapshot + RELATION_CHARACTER import cv2.aruco import datetime import logging import numpy -from typing import Any, Final, get_args +from typing import Optional logger = logging.getLogger(__name__) -# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html -# for documentation on individual parameters - -# Adaptive Thresholding -KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = "adaptiveThreshWinSizeMin" -KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = "adaptiveThreshWinSizeMax" -KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = "adaptiveThreshWinSizeStep" -KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = "adaptiveThreshConstant" -# Contour Filtering -KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = "minMarkerPerimeterRate" # Marker size ratio -KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = "maxMarkerPerimeterRate" -KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = "polygonalApproxAccuracyRate" # Square tolerance ratio -KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = "minCornerDistanceRate" # Corner separation ratio -KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = "minMarkerDistanceRate" # Marker separation ratio -KEY_MIN_DISTANCE_TO_BORDER: Final[str] = "minDistanceToBorder" # Border distance in pixels -# Bits Extraction -KEY_MARKER_BORDER_BITS: Final[str] = "markerBorderBits" # Border width (px) -KEY_MIN_OTSU_STDDEV: Final[str] = "minOtsuStdDev" # Minimum brightness stdev -KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = "perspectiveRemovePixelPerCell" # Bit Sampling Rate -KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = "perspectiveRemoveIgnoredMarginPerCell" # Bit Margin Ratio -# Marker Identification -KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = "maxErroneousBitsInBorderRate" # Border Error Rate -KEY_ERROR_CORRECTION_RATE: Final[str] = "errorCorrectionRate" # Error Correction Rat -KEY_DETECT_INVERTED_MARKER: Final[str] = "detectInvertedMarker" -KEY_CORNER_REFINEMENT_METHOD: Final[str] = "cornerRefinementMethod" -KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = "cornerRefinementWinSize" -KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = "cornerRefinementMaxIterations" -KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = "cornerRefinementMinAccuracy" -# April Tag Only -KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = "aprilTagCriticalRad" -KEY_APRIL_TAG_DEGLITCH: Final[str] = "aprilTagDeglitch" -KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = "aprilTagMaxLineFitMse" -KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = "aprilTagMaxNmaxima" -KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = "aprilTagMinClusterPixels" -KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = "aprilTagMinWhiteBlackDiff" -KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = "aprilTagQuadDecimate" -KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = "aprilTagQuadSigma" -# ArUco 3 -KEY_USE_ARUCO_3_DETECTION: Final[str] = "useAruco3Detection" -KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = "minMarkerLengthRatioOriginalImg" -KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = "minSideLengthCanonicalImg" - - class ArucoOpenCVAnnotator(Annotator): - _aruco_dictionary: Any | None # created by OpenCV, type cv2.aruco.Dictionary - _aruco_parameters: Any # created by OpenCV, type cv2.aruco.DetectorParameters - _snapshots_identified: list[MarkerSnapshot] # Markers that are determined to be valid, and are identified - _snapshots_unidentified: list[MarkerSnapshot] # Looked at first like markers but got filtered out + _aruco_dictionary: Optional # created by OpenCV, type cv2.aruco.Dictionary + _aruco_parameters: ... # created by OpenCV, type cv2.aruco.DetectorParameters + _snapshots_identified: list[Annotation] # Markers that are determined to be valid, and are identified + _snapshots_unidentified: list[Annotation] # Looked at first like markers but got filtered out _update_timestamp_utc: datetime.datetime def __init__( @@ -96,442 +42,28 @@ def __init__( self._update_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) self.set_status(Annotator.Status.RUNNING) # Always running - @staticmethod - def assign_aruco_detection_parameters_to_key_value_list( - detection_parameters: ... # cv2.aruco.DetectionParameters - ) -> list[KeyValueMetaAny]: - - return_value: list[KeyValueMetaAny] = list() - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN, - value=detection_parameters.adaptiveThreshWinSizeMin, - range_minimum=1, - range_maximum=99)) - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX, - value=detection_parameters.adaptiveThreshWinSizeMax, - range_minimum=1, - range_maximum=99)) - - return_value.append(KeyValueMetaInt( - key=KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP, - value=detection_parameters.adaptiveThreshWinSizeStep, - range_minimum=1, - range_maximum=99, - range_step=2)) - - return_value.append(KeyValueMetaFloat( - key=KEY_ADAPTIVE_THRESH_CONSTANT, - value=detection_parameters.adaptiveThreshConstant, - range_minimum=-255.0, - range_maximum=255.0, - range_step=1.0)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_PERIMETER_RATE, - value=detection_parameters.minMarkerPerimeterRate, - range_minimum=0, - range_maximum=8.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MAX_MARKER_PERIMETER_RATE, - value=detection_parameters.maxMarkerPerimeterRate, - range_minimum=0.0, - range_maximum=8.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_POLYGONAL_APPROX_ACCURACY_RATE, - value=detection_parameters.polygonalApproxAccuracyRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_CORNER_DISTANCE_RATE, - value=detection_parameters.minCornerDistanceRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_DISTANCE_RATE, - value=detection_parameters.minMarkerDistanceRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_MIN_DISTANCE_TO_BORDER, - value=detection_parameters.minDistanceToBorder, - range_minimum=0, - range_maximum=512)) - - return_value.append(KeyValueMetaInt( - key=KEY_MARKER_BORDER_BITS, - value=detection_parameters.markerBorderBits, - range_minimum=1, - range_maximum=9)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_OTSU_STDDEV, - value=detection_parameters.minOtsuStdDev, - range_minimum=0.0, - range_maximum=256.0, - range_step=1.0)) - - return_value.append(KeyValueMetaInt( - key=KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL, - value=detection_parameters.perspectiveRemovePixelPerCell, - range_minimum=1, - range_maximum=20)) - - return_value.append(KeyValueMetaFloat( - key=KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL, - value=detection_parameters.perspectiveRemoveIgnoredMarginPerCell, - range_minimum=0.0, - range_maximum=0.5, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE, - value=detection_parameters.maxErroneousBitsInBorderRate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_ERROR_CORRECTION_RATE, - value=detection_parameters.errorCorrectionRate, - range_minimum=-0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaBool( - key=KEY_DETECT_INVERTED_MARKER, - value=detection_parameters.detectInvertedMarker)) - - if detection_parameters.cornerRefinementMethod not in CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: - message: str = f"Corner refinement method appears to be set to an invalid value: " \ - f"{detection_parameters.corner_refinement_method}." - logger.error(message) - raise MCTAnnotatorRuntimeError(message=message) - corner_refinement_method_text: CornerRefinementMethod = \ - CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT[detection_parameters.cornerRefinementMethod] - return_value.append(KeyValueMetaEnum( - key=KEY_CORNER_REFINEMENT_METHOD, - value=corner_refinement_method_text, - allowable_values=get_args(CornerRefinementMethod))) - - return_value.append(KeyValueMetaInt( - key=KEY_CORNER_REFINEMENT_WIN_SIZE, - value=detection_parameters.cornerRefinementWinSize, - range_minimum=1, - range_maximum=9)) - - return_value.append(KeyValueMetaInt( - key=KEY_CORNER_REFINEMENT_MAX_ITERATIONS, - value=detection_parameters.cornerRefinementMaxIterations, - range_minimum=1, - range_maximum=100)) - - return_value.append(KeyValueMetaFloat( - key=KEY_CORNER_REFINEMENT_MIN_ACCURACY, - value=detection_parameters.cornerRefinementMinAccuracy, - range_minimum=0.0, - range_maximum=5.0, - range_step=0.1)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_CRITICAL_RAD, - value=detection_parameters.aprilTagCriticalRad, - range_minimum=-0.0, - range_maximum=numpy.pi, - range_step=numpy.pi / 20.0)) - - return_value.append(KeyValueMetaBool( - key=KEY_APRIL_TAG_DEGLITCH, - value=detection_parameters.aprilTagDeglitch)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_MAX_LINE_FIT_MSE, - value=detection_parameters.aprilTagMaxLineFitMse, - range_minimum=0.0, - range_maximum=512.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MAX_N_MAXIMA, - value=detection_parameters.aprilTagMaxNmaxima, - range_minimum=1, - range_maximum=100)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MIN_CLUSTER_PIXELS, - value=detection_parameters.aprilTagMinClusterPixels, - range_minimum=0, - range_maximum=512)) - - return_value.append(KeyValueMetaInt( - key=KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF, - value=detection_parameters.aprilTagMinWhiteBlackDiff, - range_minimum=0, - range_maximum=256)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_QUAD_DECIMATE, - value=detection_parameters.aprilTagQuadDecimate, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaFloat( - key=KEY_APRIL_TAG_QUAD_SIGMA, - value=detection_parameters.aprilTagQuadSigma, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - # Note: a relatively recent addition to OpenCV, may not be available in some python versions - if hasattr(detection_parameters, "useAruco3Detection"): - return_value.append(KeyValueMetaBool( - key=KEY_USE_ARUCO_3_DETECTION, - value=detection_parameters.useAruco3Detection)) - - return_value.append(KeyValueMetaFloat( - key=KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG, - value=detection_parameters.minMarkerLengthRatioOriginalImg, - range_minimum=0.0, - range_maximum=1.0, - range_step=0.01)) - - return_value.append(KeyValueMetaInt( - key=KEY_MIN_SIDE_LENGTH_CANONICAL_IMG, - value=detection_parameters.minSideLengthCanonicalImg, - range_minimum=1, - range_maximum=512)) - - return return_value - - @staticmethod - def assign_key_value_list_to_aruco_detection_parameters( - detection_parameters: ..., # cv2.aruco.DetectionParameters - key_value_list: list[KeyValueSimpleAny] - ) -> list[str]: - """ - Returns list of mismatched keys - """ - mismatched_keys: list[str] = list() - key_value: KeyValueSimpleAbstract - for key_value in key_value_list: - if key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeMin = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeMax = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshWinSizeStep = key_value.value - elif key_value.key == KEY_ADAPTIVE_THRESH_CONSTANT: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.adaptiveThreshConstant = key_value.value - elif key_value.key == KEY_MIN_MARKER_PERIMETER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerPerimeterRate = key_value.value - elif key_value.key == KEY_MAX_MARKER_PERIMETER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.maxMarkerPerimeterRate = key_value.value - elif key_value.key == KEY_POLYGONAL_APPROX_ACCURACY_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.polygonalApproxAccuracyRate = key_value.value - elif key_value.key == KEY_MIN_CORNER_DISTANCE_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minCornerDistanceRate = key_value.value - elif key_value.key == KEY_MIN_MARKER_DISTANCE_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerDistanceRate = key_value.value - elif key_value.key == KEY_MIN_DISTANCE_TO_BORDER: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minDistanceToBorder = key_value.value - elif key_value.key == KEY_MARKER_BORDER_BITS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.markerBorderBits = key_value.value - elif key_value.key == KEY_MIN_OTSU_STDDEV: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minOtsuStdDev = key_value.value - elif key_value.key == KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.perspectiveRemovePixelPerCell = key_value.value - elif key_value.key == KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.perspectiveRemoveIgnoredMarginPerCell = key_value.value - elif key_value.key == KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.maxErroneousBitsInBorderRate = key_value.value - elif key_value.key == KEY_ERROR_CORRECTION_RATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.errorCorrectionRate = key_value.value - elif key_value.key == KEY_DETECT_INVERTED_MARKER: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.detectInvertedMarker = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_METHOD: - if not isinstance(key_value, KeyValueSimpleString): - mismatched_keys.append(key_value.key) - continue - corner_refinement_method: str = key_value.value - if corner_refinement_method in CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: - # noinspection PyTypeChecker - detection_parameters.cornerRefinementMethod = \ - CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT[corner_refinement_method] - else: - raise MCTAnnotatorRuntimeError( - message=f"Failed to find corner refinement method {corner_refinement_method}.") - elif key_value.key == KEY_CORNER_REFINEMENT_WIN_SIZE: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementWinSize = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_MAX_ITERATIONS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementMaxIterations = key_value.value - elif key_value.key == KEY_CORNER_REFINEMENT_MIN_ACCURACY: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.cornerRefinementMinAccuracy = key_value.value - elif key_value.key == KEY_APRIL_TAG_CRITICAL_RAD: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagCriticalRad = key_value.value - elif key_value.key == KEY_APRIL_TAG_DEGLITCH: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagDeglitch = int(key_value.value) - elif key_value.key == KEY_APRIL_TAG_MAX_LINE_FIT_MSE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMaxLineFitMse = key_value.value - elif key_value.key == KEY_APRIL_TAG_MAX_N_MAXIMA: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMaxNmaxima = key_value.value - elif key_value.key == KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMinClusterPixels = key_value.value - elif key_value.key == KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagMinWhiteBlackDiff = key_value.value - elif key_value.key == KEY_APRIL_TAG_QUAD_DECIMATE: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagQuadDecimate = key_value.value - elif key_value.key == KEY_APRIL_TAG_QUAD_SIGMA: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.aprilTagQuadSigma = key_value.value - elif key_value.key == KEY_USE_ARUCO_3_DETECTION: - if not isinstance(key_value, KeyValueSimpleBool): - mismatched_keys.append(key_value.key) - continue - detection_parameters.useAruco3Detection = key_value.value - elif key_value.key == KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: - if not isinstance(key_value, KeyValueSimpleFloat): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minMarkerLengthRatioOriginalImg = key_value.value - elif key_value.key == KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: - if not isinstance(key_value, KeyValueSimpleInt): - mismatched_keys.append(key_value.key) - continue - detection_parameters.minSideLengthCanonicalImg = key_value.value - else: - mismatched_keys.append(key_value.key) - return mismatched_keys - def get_changed_timestamp(self) -> datetime.datetime: return self._update_timestamp_utc - def get_markers_detected(self) -> list[MarkerSnapshot]: + def get_markers_detected(self) -> list[Annotation]: return self._snapshots_identified - def get_markers_rejected(self) -> list[MarkerSnapshot]: + def get_markers_rejected(self) -> list[Annotation]: return self._snapshots_unidentified def get_parameters(self) -> list[KeyValueMetaAny]: - return self.assign_aruco_detection_parameters_to_key_value_list(self._aruco_parameters) + return ArucoOpenCVCommon.assign_aruco_detection_parameters_to_key_value_list(self._aruco_parameters) @staticmethod def get_type_identifier() -> str: return "aruco_opencv" - @staticmethod - def _corner_image_point_list_from_embedded_list( - corner_image_points_px: list[list[float]] - ) -> list[MarkerCornerImagePoint]: - corner_image_point_list: list[MarkerCornerImagePoint] = list() - assert len(corner_image_points_px) == 4 - for corner_image_point_px in corner_image_points_px: - corner_image_point_list.append(MarkerCornerImagePoint( - x_px=corner_image_point_px[0], - y_px=corner_image_point_px[1])) - return corner_image_point_list - # noinspection DuplicatedCode def set_parameters( self, parameters: list[KeyValueSimpleAny] ) -> None: - mismatched_keys: list[str] = self.assign_key_value_list_to_aruco_detection_parameters( + mismatched_keys: list[str] = ArucoOpenCVCommon.assign_key_value_list_to_aruco_detection_parameters( detection_parameters=self._aruco_parameters, key_value_list=parameters) if len(mismatched_keys) > 0: @@ -562,25 +94,21 @@ def update( detected_corner_points_px = numpy.array(detected_corner_points_raw).reshape((detected_count, 4, 2)) detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_count)) for detected_index, detected_id in enumerate(detected_dictionary_indices): - detected_label: str = str(detected_id) - corner_image_points_px = detected_corner_points_px[detected_index] - corner_image_points: list[MarkerCornerImagePoint] = \ - self._corner_image_point_list_from_embedded_list( - corner_image_points_px=corner_image_points_px.tolist()) - self._snapshots_identified.append(MarkerSnapshot( - label=detected_label, - corner_image_points=corner_image_points)) + for corner_index in range(4): + detected_label: str = f"{detected_id}{RELATION_CHARACTER}{corner_index}" + self._snapshots_identified.append(Annotation( + label=detected_label, + x_px=detected_corner_points_px[detected_index][corner_index][0], + y_px=detected_corner_points_px[detected_index][corner_index][1])) self._snapshots_unidentified = list() if rejected_corner_points_raw: rejected_corner_points_px = numpy.array(rejected_corner_points_raw).reshape((-1, 4, 2)) for rejected_index in range(rejected_corner_points_px.shape[0]): - corner_image_points_px = rejected_corner_points_px[rejected_index] - corner_image_points: list[MarkerCornerImagePoint] = \ - self._corner_image_point_list_from_embedded_list( - corner_image_points_px=corner_image_points_px.tolist()) - self._snapshots_unidentified.append(MarkerSnapshot( - label=f"unknown", - corner_image_points=corner_image_points)) + for corner_index in range(4): + self._snapshots_unidentified.append(Annotation( + label=Annotation.UNIDENTIFIED_LABEL, + x_px=rejected_corner_points_px[rejected_index][corner_index][0], + y_px=rejected_corner_points_px[rejected_index][corner_index][1])) self._update_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/implementations/camera_opencv_capture_device.py b/src/implementations/camera_opencv_capture_device.py index c694933..888c879 100644 --- a/src/implementations/camera_opencv_capture_device.py +++ b/src/implementations/camera_opencv_capture_device.py @@ -1,7 +1,5 @@ from src.common import \ Camera, \ - CameraConfiguration, \ - CameraStatus, \ ImageUtils, \ MCTCameraRuntimeError, \ StatusMessageSource @@ -79,7 +77,7 @@ class OpenCVCaptureDeviceCamera(Camera): def __init__( self, - configuration: CameraConfiguration, + configuration: Camera.Configuration, status_message_source: StatusMessageSource ): super().__init__( @@ -89,7 +87,7 @@ def __init__( self._image_timestamp_utc = datetime.datetime.min self._capture = None self._capture_device_id = configuration.capture_device - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) def __del__(self): if self._capture is not None: @@ -275,13 +273,13 @@ def start(self) -> None: self._capture.set(cv2.CAP_PROP_SHARPNESS, float(_CAMERA_SHARPNESS_DEFAULT)) self._capture.set(cv2.CAP_PROP_GAMMA, float(_CAMERA_GAMMA_DEFAULT)) - self.set_status(CameraStatus.RUNNING) + self.set_status(Camera.Status.RUNNING) def stop(self) -> None: if self._capture is not None: self._capture.release() self._capture = None - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) def update(self) -> None: grabbed_frame: bool @@ -289,7 +287,7 @@ def update(self) -> None: if not grabbed_frame: message: str = "Failed to grab frame." self.add_status_message(severity="error", message=message) - self.set_status(CameraStatus.FAILURE) + self.set_status(Camera.Status.FAILURE) raise MCTCameraRuntimeError(message=message) retrieved_frame: bool @@ -297,7 +295,7 @@ def update(self) -> None: if not retrieved_frame: message: str = "Failed to retrieve frame." self.add_status_message(severity="error", message=message) - self.set_status(CameraStatus.FAILURE) + self.set_status(Camera.Status.FAILURE) raise MCTCameraRuntimeError(message=message) self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/implementations/camera_picamera2.py b/src/implementations/camera_picamera2.py index 92a8586..b1846c7 100644 --- a/src/implementations/camera_picamera2.py +++ b/src/implementations/camera_picamera2.py @@ -1,7 +1,5 @@ from src.common import \ Camera, \ - CameraConfiguration, \ - CameraStatus, \ MCTCameraRuntimeError, \ StatusMessageSource from src.common.structures import \ @@ -85,7 +83,7 @@ class Picamera2Camera(Camera): def __init__( self, - configuration: CameraConfiguration, + configuration: Camera.Configuration, status_message_source: StatusMessageSource ): super().__init__( @@ -95,7 +93,7 @@ def __init__( self._image_timestamp_utc = datetime.datetime.min self._camera = Picamera2() self._camera_configuration = self._camera.create_video_configuration() - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) def get_changed_timestamp(self) -> datetime.datetime: return self._image_timestamp_utc @@ -106,7 +104,7 @@ def get_image(self) -> numpy.ndarray: return self._image def get_parameters(self, **_kwargs) -> list[KeyValueMetaAbstract]: - if self.get_status() != CameraStatus.RUNNING: + if self.get_status() != Camera.Status.RUNNING: raise MCTCameraRuntimeError(message="The capture is not active, and properties cannot be retrieved.") current_controls: dict = { @@ -226,7 +224,7 @@ def set_parameters(self, parameters: list[KeyValueSimpleAny]) -> None: raise MCTCameraRuntimeError( message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") - if self.get_status() == CameraStatus.RUNNING: + if self.get_status() == Camera.Status.RUNNING: self._camera.stop() self._camera.configure(self._camera_configuration) self._camera.start() @@ -260,12 +258,12 @@ def start(self) -> None: self._camera.configure(self._camera_configuration) self._camera.start() self._image = self._camera.capture_array() - self.set_status(CameraStatus.RUNNING) + self.set_status(Camera.Status.RUNNING) def stop(self) -> None: if self._image is not None: self._image = None - self.set_status(CameraStatus.STOPPED) + self.set_status(Camera.Status.STOPPED) self._camera.stop() def update(self) -> None: @@ -274,7 +272,7 @@ def update(self) -> None: if self._image is None: message: str = "Failed to grab frame." self.add_status_message(severity="error", message=message) - self.set_status(CameraStatus.FAILURE) + self.set_status(Camera.Status.FAILURE) raise MCTCameraRuntimeError(message=message) self._image_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/implementations/common_aruco_opencv.py b/src/implementations/common_aruco_opencv.py new file mode 100644 index 0000000..ad535af --- /dev/null +++ b/src/implementations/common_aruco_opencv.py @@ -0,0 +1,619 @@ +from src.common.structures import \ + KeyValueMetaAny, \ + KeyValueMetaBool, \ + KeyValueMetaEnum, \ + KeyValueMetaFloat, \ + KeyValueMetaInt, \ + KeyValueSimpleAbstract, \ + KeyValueSimpleAny, \ + KeyValueSimpleBool, \ + KeyValueSimpleFloat, \ + KeyValueSimpleInt, \ + KeyValueSimpleString, \ + MCTSerializationError +import cv2.aruco +import logging +import numpy +from typing import Final, get_args, Literal + + +logger = logging.getLogger(__name__) + + +# Look at https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html +# for documentation on individual parameters + +# Adaptive Thresholding +_KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = "adaptiveThreshWinSizeMin" +_KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = "adaptiveThreshWinSizeMax" +_KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = "adaptiveThreshWinSizeStep" +_KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = "adaptiveThreshConstant" +# Contour Filtering +_KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = "minMarkerPerimeterRate" # Marker size ratio +_KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = "maxMarkerPerimeterRate" +_KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = "polygonalApproxAccuracyRate" # Square tolerance ratio +_KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = "minCornerDistanceRate" # Corner separation ratio +_KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = "minMarkerDistanceRate" # Marker separation ratio +_KEY_MIN_DISTANCE_TO_BORDER: Final[str] = "minDistanceToBorder" # Border distance in pixels +# Bits Extraction +_KEY_MARKER_BORDER_BITS: Final[str] = "markerBorderBits" # Border width (px) +_KEY_MIN_OTSU_STDDEV: Final[str] = "minOtsuStdDev" # Minimum brightness stdev +_KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = "perspectiveRemovePixelPerCell" # Bit Sampling Rate +_KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = "perspectiveRemoveIgnoredMarginPerCell" # Bit Margin Ratio +# Marker Identification +_KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = "maxErroneousBitsInBorderRate" # Border Error Rate +_KEY_ERROR_CORRECTION_RATE: Final[str] = "errorCorrectionRate" # Error Correction Rat +_KEY_DETECT_INVERTED_MARKER: Final[str] = "detectInvertedMarker" +_KEY_CORNER_REFINEMENT_METHOD: Final[str] = "cornerRefinementMethod" +_KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = "cornerRefinementWinSize" +_KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = "cornerRefinementMaxIterations" +_KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = "cornerRefinementMinAccuracy" +# April Tag Only +_KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = "aprilTagCriticalRad" +_KEY_APRIL_TAG_DEGLITCH: Final[str] = "aprilTagDeglitch" +_KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = "aprilTagMaxLineFitMse" +_KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = "aprilTagMaxNmaxima" +_KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = "aprilTagMinClusterPixels" +_KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = "aprilTagMinWhiteBlackDiff" +_KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = "aprilTagQuadDecimate" +_KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = "aprilTagQuadSigma" +# ArUco 3 +_KEY_USE_ARUCO_3_DETECTION: Final[str] = "useAruco3Detection" +_KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = "minMarkerLengthRatioOriginalImg" +_KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = "minSideLengthCanonicalImg" + + +class ArucoOpenCVCommon: + """ + A "class" to group related static functions and constants, like in a namespace. + The class itself is not meant to be instantiated. + """ + + def __init__(self): + raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") + + + KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: Final[str] = _KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN + KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: Final[str] = _KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX + KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: Final[str] = _KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP + KEY_ADAPTIVE_THRESH_CONSTANT: Final[str] = _KEY_ADAPTIVE_THRESH_CONSTANT + KEY_MIN_MARKER_PERIMETER_RATE: Final[str] = _KEY_MIN_MARKER_PERIMETER_RATE + KEY_MAX_MARKER_PERIMETER_RATE: Final[str] = _KEY_MAX_MARKER_PERIMETER_RATE + KEY_POLYGONAL_APPROX_ACCURACY_RATE: Final[str] = _KEY_POLYGONAL_APPROX_ACCURACY_RATE + KEY_MIN_CORNER_DISTANCE_RATE: Final[str] = _KEY_MIN_CORNER_DISTANCE_RATE + KEY_MIN_MARKER_DISTANCE_RATE: Final[str] = _KEY_MIN_MARKER_DISTANCE_RATE + KEY_MIN_DISTANCE_TO_BORDER: Final[str] = _KEY_MIN_DISTANCE_TO_BORDER + KEY_MARKER_BORDER_BITS: Final[str] = _KEY_MARKER_BORDER_BITS + KEY_MIN_OTSU_STDDEV: Final[str] = _KEY_MIN_OTSU_STDDEV + KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: Final[str] = _KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL + KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: Final[str] = _KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL + KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: Final[str] = _KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE + KEY_ERROR_CORRECTION_RATE: Final[str] = _KEY_ERROR_CORRECTION_RATE + KEY_DETECT_INVERTED_MARKER: Final[str] = _KEY_DETECT_INVERTED_MARKER + KEY_CORNER_REFINEMENT_METHOD: Final[str] = _KEY_CORNER_REFINEMENT_METHOD + KEY_CORNER_REFINEMENT_WIN_SIZE: Final[str] = _KEY_CORNER_REFINEMENT_WIN_SIZE + KEY_CORNER_REFINEMENT_MAX_ITERATIONS: Final[str] = _KEY_CORNER_REFINEMENT_MAX_ITERATIONS + KEY_CORNER_REFINEMENT_MIN_ACCURACY: Final[str] = _KEY_CORNER_REFINEMENT_MIN_ACCURACY + KEY_APRIL_TAG_CRITICAL_RAD: Final[str] = _KEY_APRIL_TAG_CRITICAL_RAD + KEY_APRIL_TAG_DEGLITCH: Final[str] = _KEY_APRIL_TAG_DEGLITCH + KEY_APRIL_TAG_MAX_LINE_FIT_MSE: Final[str] = _KEY_APRIL_TAG_MAX_LINE_FIT_MSE + KEY_APRIL_TAG_MAX_N_MAXIMA: Final[str] = _KEY_APRIL_TAG_MAX_N_MAXIMA + KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: Final[str] = _KEY_APRIL_TAG_MIN_CLUSTER_PIXELS + KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: Final[str] = _KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF + KEY_APRIL_TAG_QUAD_DECIMATE: Final[str] = _KEY_APRIL_TAG_QUAD_DECIMATE + KEY_APRIL_TAG_QUAD_SIGMA: Final[str] = _KEY_APRIL_TAG_QUAD_SIGMA + KEY_USE_ARUCO_3_DETECTION: Final[str] = _KEY_USE_ARUCO_3_DETECTION + KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: Final[str] = _KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG + KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: Final[str] = _KEY_MIN_SIDE_LENGTH_CANONICAL_IMG + + CornerRefinementMethod = Literal["NONE", "SUBPIX", "CONTOUR", "APRILTAG"] + CORNER_REFINEMENT_METHOD_NONE: Final[str] = 'NONE' + CORNER_REFINEMENT_METHOD_SUBPIX: Final[str] = 'SUBPIX' + CORNER_REFINEMENT_METHOD_CONTOUR: Final[str] = 'CONTOUR' + CORNER_REFINEMENT_METHOD_APRILTAG: Final[str] = 'APRILTAG' + + CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: dict[CornerRefinementMethod, int] = { + "NONE": cv2.aruco.CORNER_REFINE_NONE, + "SUBPIX": cv2.aruco.CORNER_REFINE_SUBPIX, + "CONTOUR": cv2.aruco.CORNER_REFINE_CONTOUR, + "APRILTAG": cv2.aruco.CORNER_REFINE_APRILTAG} + + CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: dict[int, CornerRefinementMethod] = { + cv2.aruco.CORNER_REFINE_NONE: "NONE", + cv2.aruco.CORNER_REFINE_SUBPIX: "SUBPIX", + cv2.aruco.CORNER_REFINE_CONTOUR: "CONTOUR", + cv2.aruco.CORNER_REFINE_APRILTAG: "APRILTAG"} + + class CharucoBoard: + dictionary_name: str + square_count_x: int + square_count_y: int + square_size_px: int + marker_size_px: int + px_per_mm: float + + def __init__(self): + self.dictionary_name = "DICT_4X4_100" + self.square_count_x = 8 + self.square_count_y = 10 + self.square_size_px = 800 + self.marker_size_px = 400 + self.px_per_mm = 40 + + def aruco_dictionary(self) -> ...: # type cv2.aruco.Dictionary + if self.dictionary_name != "DICT_4X4_100": + raise NotImplementedError("Only DICT_4X4_100 is currently implemented") + aruco_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) + return aruco_dictionary + + def size_px(self) -> tuple[float, float]: + board_size_x_px = self.square_count_x * self.square_size_px + board_size_y_px = self.square_count_y * self.square_size_px + return board_size_x_px, board_size_y_px + + def size_mm(self) -> tuple[float, float]: + board_size_x_mm = self.square_count_x * self.square_size_px / self.px_per_mm + board_size_y_mm = self.square_count_y * self.square_size_px / self.px_per_mm + return board_size_x_mm, board_size_y_mm + + def create_board(self) -> ...: # type cv2.aruco.CharucoBoard + charuco_board = cv2.aruco.CharucoBoard( + size=(self.square_count_x, self.square_count_y), + squareLength=self.square_size_px, + markerLength=self.marker_size_px, + dictionary=self.aruco_dictionary()) + return charuco_board + + def get_marker_center_points(self) -> list[list[float]]: + """ + Note that the coordinates assume (based on portrait orientation): + origin: at bottom-left of board + x-axis: goes right + y-axis: goes up the page + z-axis: comes out of the image and toward the viewer + """ + points = [] + for y in range(self.square_count_y): + for x in range(self.square_count_x): + if (x + y) % 2 == 1: # Only add the points for the white squares + point_x = (x + 0.5) * self.square_size_px / self.px_per_mm + point_y = (self.square_count_y - y - 0.5) * self.square_size_px / self.px_per_mm + points.append([point_x, point_y, 0.0]) + return points + + def get_marker_corner_points(self) -> list[list[float]]: + """ + Note that the coordinates assume the same axes as get_marker_center_points, + but the origin is in the center of the board, not the bottom-left corner. + """ + points = [] + marker_size_mm: float = self.marker_size_px / self.px_per_mm + square_size_mm: float = self.square_size_px / self.px_per_mm + for y_sq in range(self.square_count_y): + for x_sq in range(self.square_count_x): + if (x_sq + y_sq) % 2 == 1: # Only add the points for the white squares + x_sq_centered: float = x_sq - (self.square_count_x / 2.0) + y_sq_centered: float = y_sq - (self.square_count_y / 2.0) + for corner_index in range(0, 4): + x_mm: float = (x_sq_centered + 0.5) * square_size_mm + if corner_index == 0 or corner_index == 3: + x_mm -= (marker_size_mm / 2.0) + else: + x_mm += (marker_size_mm / 2.0) + y_mm: float = (-(y_sq_centered + 0.5)) * square_size_mm + if corner_index == 0 or corner_index == 1: + y_mm += (marker_size_mm / 2.0) + else: + y_mm -= (marker_size_mm / 2.0) + z_mm: float = 0.0 + points.append([x_mm, y_mm, z_mm]) + return points + + def get_marker_ids(self) -> list[int]: + num_markers = self.square_count_x * self.square_count_y // 2 + return list(range(num_markers)) + + @staticmethod + def assign_aruco_detection_parameters_to_key_value_list( + detection_parameters: ... # cv2.aruco.DetectionParameters + ) -> list[KeyValueMetaAny]: + + return_value: list[KeyValueMetaAny] = list() + + return_value.append(KeyValueMetaInt( + key=_KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN, + value=detection_parameters.adaptiveThreshWinSizeMin, + range_minimum=1, + range_maximum=99)) + + return_value.append(KeyValueMetaInt( + key=_KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX, + value=detection_parameters.adaptiveThreshWinSizeMax, + range_minimum=1, + range_maximum=99)) + + return_value.append(KeyValueMetaInt( + key=_KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP, + value=detection_parameters.adaptiveThreshWinSizeStep, + range_minimum=1, + range_maximum=99, + range_step=2)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_ADAPTIVE_THRESH_CONSTANT, + value=detection_parameters.adaptiveThreshConstant, + range_minimum=-255.0, + range_maximum=255.0, + range_step=1.0)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_MARKER_PERIMETER_RATE, + value=detection_parameters.minMarkerPerimeterRate, + range_minimum=0, + range_maximum=8.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MAX_MARKER_PERIMETER_RATE, + value=detection_parameters.maxMarkerPerimeterRate, + range_minimum=0.0, + range_maximum=8.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_POLYGONAL_APPROX_ACCURACY_RATE, + value=detection_parameters.polygonalApproxAccuracyRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_CORNER_DISTANCE_RATE, + value=detection_parameters.minCornerDistanceRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_MARKER_DISTANCE_RATE, + value=detection_parameters.minMarkerDistanceRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=_KEY_MIN_DISTANCE_TO_BORDER, + value=detection_parameters.minDistanceToBorder, + range_minimum=0, + range_maximum=512)) + + return_value.append(KeyValueMetaInt( + key=_KEY_MARKER_BORDER_BITS, + value=detection_parameters.markerBorderBits, + range_minimum=1, + range_maximum=9)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_OTSU_STDDEV, + value=detection_parameters.minOtsuStdDev, + range_minimum=0.0, + range_maximum=256.0, + range_step=1.0)) + + return_value.append(KeyValueMetaInt( + key=_KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL, + value=detection_parameters.perspectiveRemovePixelPerCell, + range_minimum=1, + range_maximum=20)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL, + value=detection_parameters.perspectiveRemoveIgnoredMarginPerCell, + range_minimum=0.0, + range_maximum=0.5, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE, + value=detection_parameters.maxErroneousBitsInBorderRate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_ERROR_CORRECTION_RATE, + value=detection_parameters.errorCorrectionRate, + range_minimum=-0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaBool( + key=_KEY_DETECT_INVERTED_MARKER, + value=detection_parameters.detectInvertedMarker)) + + if detection_parameters.cornerRefinementMethod not in \ + ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT: + message: str = f"Corner refinement method appears to be set to an invalid value: " \ + f"{detection_parameters.corner_refinement_method}." + logger.error(message) + raise MCTSerializationError(message=message) + corner_refinement_method_text: ArucoOpenCVCommon.CornerRefinementMethod = \ + ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_INT_TO_TEXT[ + detection_parameters.cornerRefinementMethod] + return_value.append(KeyValueMetaEnum( + key=_KEY_CORNER_REFINEMENT_METHOD, + value=corner_refinement_method_text, + allowable_values=get_args(ArucoOpenCVCommon.CornerRefinementMethod))) + + return_value.append(KeyValueMetaInt( + key=_KEY_CORNER_REFINEMENT_WIN_SIZE, + value=detection_parameters.cornerRefinementWinSize, + range_minimum=1, + range_maximum=9)) + + return_value.append(KeyValueMetaInt( + key=_KEY_CORNER_REFINEMENT_MAX_ITERATIONS, + value=detection_parameters.cornerRefinementMaxIterations, + range_minimum=1, + range_maximum=100)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_CORNER_REFINEMENT_MIN_ACCURACY, + value=detection_parameters.cornerRefinementMinAccuracy, + range_minimum=0.0, + range_maximum=5.0, + range_step=0.1)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_CRITICAL_RAD, + value=detection_parameters.aprilTagCriticalRad, + range_minimum=-0.0, + range_maximum=numpy.pi, + range_step=numpy.pi / 20.0)) + + return_value.append(KeyValueMetaBool( + key=_KEY_APRIL_TAG_DEGLITCH, + value=detection_parameters.aprilTagDeglitch)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_MAX_LINE_FIT_MSE, + value=detection_parameters.aprilTagMaxLineFitMse, + range_minimum=0.0, + range_maximum=512.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=_KEY_APRIL_TAG_MAX_N_MAXIMA, + value=detection_parameters.aprilTagMaxNmaxima, + range_minimum=1, + range_maximum=100)) + + return_value.append(KeyValueMetaInt( + key=_KEY_APRIL_TAG_MIN_CLUSTER_PIXELS, + value=detection_parameters.aprilTagMinClusterPixels, + range_minimum=0, + range_maximum=512)) + + return_value.append(KeyValueMetaInt( + key=_KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF, + value=detection_parameters.aprilTagMinWhiteBlackDiff, + range_minimum=0, + range_maximum=256)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_QUAD_DECIMATE, + value=detection_parameters.aprilTagQuadDecimate, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_APRIL_TAG_QUAD_SIGMA, + value=detection_parameters.aprilTagQuadSigma, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + # Note: a relatively recent addition to OpenCV, may not be available in some python versions + if hasattr(detection_parameters, "useAruco3Detection"): + return_value.append(KeyValueMetaBool( + key=_KEY_USE_ARUCO_3_DETECTION, + value=detection_parameters.useAruco3Detection)) + + return_value.append(KeyValueMetaFloat( + key=_KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG, + value=detection_parameters.minMarkerLengthRatioOriginalImg, + range_minimum=0.0, + range_maximum=1.0, + range_step=0.01)) + + return_value.append(KeyValueMetaInt( + key=_KEY_MIN_SIDE_LENGTH_CANONICAL_IMG, + value=detection_parameters.minSideLengthCanonicalImg, + range_minimum=1, + range_maximum=512)) + + return return_value + + @staticmethod + def assign_key_value_list_to_aruco_detection_parameters( + detection_parameters: ..., # cv2.aruco.DetectionParameters + key_value_list: list[KeyValueSimpleAny] + ) -> list[str]: + """ + Returns list of mismatched keys + """ + mismatched_keys: list[str] = list() + key_value: KeyValueSimpleAbstract + for key_value in key_value_list: + if key_value.key == _KEY_ADAPTIVE_THRESH_WIN_SIZE_MIN: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeMin = key_value.value + elif key_value.key == _KEY_ADAPTIVE_THRESH_WIN_SIZE_MAX: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeMax = key_value.value + elif key_value.key == _KEY_ADAPTIVE_THRESH_WIN_SIZE_STEP: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshWinSizeStep = key_value.value + elif key_value.key == _KEY_ADAPTIVE_THRESH_CONSTANT: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.adaptiveThreshConstant = key_value.value + elif key_value.key == _KEY_MIN_MARKER_PERIMETER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerPerimeterRate = key_value.value + elif key_value.key == _KEY_MAX_MARKER_PERIMETER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.maxMarkerPerimeterRate = key_value.value + elif key_value.key == _KEY_POLYGONAL_APPROX_ACCURACY_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.polygonalApproxAccuracyRate = key_value.value + elif key_value.key == _KEY_MIN_CORNER_DISTANCE_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minCornerDistanceRate = key_value.value + elif key_value.key == _KEY_MIN_MARKER_DISTANCE_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerDistanceRate = key_value.value + elif key_value.key == _KEY_MIN_DISTANCE_TO_BORDER: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minDistanceToBorder = key_value.value + elif key_value.key == _KEY_MARKER_BORDER_BITS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.markerBorderBits = key_value.value + elif key_value.key == _KEY_MIN_OTSU_STDDEV: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minOtsuStdDev = key_value.value + elif key_value.key == _KEY_PERSPECTIVE_REMOVE_PIXEL_PER_CELL: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.perspectiveRemovePixelPerCell = key_value.value + elif key_value.key == _KEY_PERSPECTIVE_REMOVE_IGNORED_MARGIN_PER_CELL: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.perspectiveRemoveIgnoredMarginPerCell = key_value.value + elif key_value.key == _KEY_MAX_ERRONEOUS_BITS_IN_BORDER_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.maxErroneousBitsInBorderRate = key_value.value + elif key_value.key == _KEY_ERROR_CORRECTION_RATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.errorCorrectionRate = key_value.value + elif key_value.key == _KEY_DETECT_INVERTED_MARKER: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.detectInvertedMarker = key_value.value + elif key_value.key == _KEY_CORNER_REFINEMENT_METHOD: + if not isinstance(key_value, KeyValueSimpleString): + mismatched_keys.append(key_value.key) + continue + corner_refinement_method: str = key_value.value + if corner_refinement_method in ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT: + # noinspection PyTypeChecker + detection_parameters.cornerRefinementMethod = \ + ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_DICTIONARY_TEXT_TO_INT[ + corner_refinement_method] + else: + raise MCTSerializationError( + message=f"Failed to find corner refinement method {corner_refinement_method}.") + elif key_value.key == _KEY_CORNER_REFINEMENT_WIN_SIZE: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementWinSize = key_value.value + elif key_value.key == _KEY_CORNER_REFINEMENT_MAX_ITERATIONS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementMaxIterations = key_value.value + elif key_value.key == _KEY_CORNER_REFINEMENT_MIN_ACCURACY: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.cornerRefinementMinAccuracy = key_value.value + elif key_value.key == _KEY_APRIL_TAG_CRITICAL_RAD: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagCriticalRad = key_value.value + elif key_value.key == _KEY_APRIL_TAG_DEGLITCH: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagDeglitch = int(key_value.value) + elif key_value.key == _KEY_APRIL_TAG_MAX_LINE_FIT_MSE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMaxLineFitMse = key_value.value + elif key_value.key == _KEY_APRIL_TAG_MAX_N_MAXIMA: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMaxNmaxima = key_value.value + elif key_value.key == _KEY_APRIL_TAG_MIN_CLUSTER_PIXELS: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMinClusterPixels = key_value.value + elif key_value.key == _KEY_APRIL_TAG_MIN_WHITE_BLACK_DIFF: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagMinWhiteBlackDiff = key_value.value + elif key_value.key == _KEY_APRIL_TAG_QUAD_DECIMATE: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagQuadDecimate = key_value.value + elif key_value.key == _KEY_APRIL_TAG_QUAD_SIGMA: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.aprilTagQuadSigma = key_value.value + elif key_value.key == _KEY_USE_ARUCO_3_DETECTION: + if not isinstance(key_value, KeyValueSimpleBool): + mismatched_keys.append(key_value.key) + continue + detection_parameters.useAruco3Detection = key_value.value + elif key_value.key == _KEY_MIN_MARKER_LENGTH_RATIO_ORIGINAL_IMG: + if not isinstance(key_value, KeyValueSimpleFloat): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minMarkerLengthRatioOriginalImg = key_value.value + elif key_value.key == _KEY_MIN_SIDE_LENGTH_CANONICAL_IMG: + if not isinstance(key_value, KeyValueSimpleInt): + mismatched_keys.append(key_value.key) + continue + detection_parameters.minSideLengthCanonicalImg = key_value.value + else: + mismatched_keys.append(key_value.key) + return mismatched_keys diff --git a/src/implementations/intrinsic_charuco_opencv.py b/src/implementations/intrinsic_charuco_opencv.py new file mode 100644 index 0000000..091bd62 --- /dev/null +++ b/src/implementations/intrinsic_charuco_opencv.py @@ -0,0 +1,137 @@ +from .common_aruco_opencv import ArucoOpenCVCommon +from src.common import \ + IntrinsicCalibrator, \ + MCTIntrinsicCalibrationError +from src.common.structures import \ + ImageResolution, \ + IntrinsicCalibration, \ + IntrinsicParameters +import cv2 +import cv2.aruco +import datetime +import numpy + + +class CharucoOpenCVIntrinsicCalibrator(IntrinsicCalibrator): + def _calculate_implementation( + self, + image_resolution: ImageResolution, + image_identifiers: list[str] + ) -> tuple[IntrinsicCalibration, list[str]]: # image_identifiers that were actually used in calibration + aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() + + # mismatched_keys: list[str] = ArucoOpenCVAnnotator.assign_key_value_list_to_aruco_detection_parameters( + # detection_parameters=aruco_detector_parameters, + # key_value_list=marker_parameters) + # if len(mismatched_keys) > 0: + # raise MCTIntrinsicCalibrationError( + # message=f"The following parameters could not be applied due to key mismatch: {str(mismatched_keys)}") + + charuco_spec = ArucoOpenCVCommon.CharucoBoard() + charuco_board: cv2.aruco.CharucoBoard = charuco_spec.create_board() + + all_charuco_corners = list() + all_charuco_ids = list() + used_image_identifiers: list[str] = list() + for image_identifier in image_identifiers: + image_filepath: str = self._image_filepath( + map_key=image_resolution, + image_identifier=image_identifier) + image_rgb = cv2.imread(image_filepath) + image_greyscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) + (marker_corners, marker_ids, _) = cv2.aruco.detectMarkers( + image=image_greyscale, + dictionary=charuco_spec.aruco_dictionary(), + parameters=aruco_detector_parameters) + if len(marker_corners) <= 0: + self._status_message_source.enqueue_status_message( + severity="warning", + message=f"Image {image_identifier} did not appear to contain any identifiable markers. " + f"It will be omitted from the calibration.") + continue + used_image_identifiers.append(image_identifier) + # Note: + # Marker corners are the corners of the markers, whereas + # ChArUco corners are the corners of the chessboard. + # ChArUco calibration function works with the corners of the chessboard. + _, frame_charuco_corners, frame_charuco_ids = cv2.aruco.interpolateCornersCharuco( + markerCorners=marker_corners, + markerIds=marker_ids, + image=image_greyscale, + board=charuco_board, + ) + # Algorithm requires a minimum of 4 markers + if frame_charuco_corners is not None and len(frame_charuco_corners) >= 4: + all_charuco_corners.append(frame_charuco_corners) + all_charuco_ids.append(frame_charuco_ids) + + if len(all_charuco_corners) <= 0: + raise MCTIntrinsicCalibrationError(message="The input images did not contain visible markers.") + + # outputs to be stored in these containers + calibration_result = cv2.aruco.calibrateCameraCharucoExtended( + charucoCorners=all_charuco_corners, + charucoIds=all_charuco_ids, + board=charuco_board, + imageSize=numpy.array(charuco_spec.size_mm(), dtype="int32"), # Exception if float + cameraMatrix=numpy.identity(3, dtype='f'), + distCoeffs=numpy.zeros(5, dtype='f')) + + charuco_overall_reprojection_error = calibration_result[0] + charuco_camera_matrix = calibration_result[1] + charuco_distortion_coefficients = calibration_result[2] + charuco_rotation_vectors = calibration_result[3] + charuco_translation_vectors = calibration_result[4] + charuco_intrinsic_stdevs = calibration_result[5] + charuco_extrinsic_stdevs = calibration_result[6] + charuco_reprojection_errors = calibration_result[7] + + supplemental_data: dict = { + "reprojection_error": charuco_overall_reprojection_error, + "calibrated_stdevs": [value[0] for value in charuco_intrinsic_stdevs], + # "marker_parameters": marker_parameters, + "frame_results": [{ + "image_identifier": image_identifiers[i], + "translation": [ + charuco_translation_vectors[i][0, 0], + charuco_translation_vectors[i][1, 0], + charuco_translation_vectors[i][2, 0]], + "rotation": [ + charuco_rotation_vectors[i][0, 0], + charuco_rotation_vectors[i][1, 0], + charuco_rotation_vectors[i][2, 0]], + "translation_stdev": [ + charuco_extrinsic_stdevs[i * 6 + 3, 0], + charuco_extrinsic_stdevs[i * 6 + 4, 0], + charuco_extrinsic_stdevs[i * 6 + 5, 0]], + "rotation_stdev": [ + charuco_extrinsic_stdevs[i * 6 + 0, 0], + charuco_extrinsic_stdevs[i * 6 + 1, 0], + charuco_extrinsic_stdevs[i * 6 + 2, 0]], + "reprojection_error": charuco_reprojection_errors[i, 0]} + for i in range(0, len(charuco_reprojection_errors))]} + + # TODO: Assertion on size of distortion coefficients being 5? + # Note: OpenCV documentation specifies the order of distortion coefficients + # https://docs.opencv.org/4.x/d9/d6a/group__aruco.html#ga366993d29fdddd995fba8c2e6ca811ea + # So far I have not seen calibration return a number of coefficients other than 5. + # Note too that there is an unchecked expectation that radial distortion be monotonic. + + intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration( + timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), + image_resolution=image_resolution, + calibrated_values=IntrinsicParameters( + focal_length_x_px=charuco_camera_matrix[0, 0], + focal_length_y_px=charuco_camera_matrix[1, 1], + optical_center_x_px=charuco_camera_matrix[0, 2], + optical_center_y_px=charuco_camera_matrix[1, 2], + radial_distortion_coefficients=[ + charuco_distortion_coefficients[0, 0], + charuco_distortion_coefficients[1, 0], + charuco_distortion_coefficients[4, 0]], + tangential_distortion_coefficients=[ + charuco_distortion_coefficients[2, 0], + charuco_distortion_coefficients[3, 0]]), + supplemental_data=supplemental_data) + + return intrinsic_calibration, used_image_identifiers diff --git a/src/pose_solver/__init__.py b/src/pose_solver/__init__.py new file mode 100644 index 0000000..efacf24 --- /dev/null +++ b/src/pose_solver/__init__.py @@ -0,0 +1,14 @@ +from .api import \ + PoseSolverAddDetectorFrameRequest, \ + PoseSolverAddTargetMarkerRequest, \ + PoseSolverAddTargetBoardRequest, \ + PoseSolverAddTargetResponse, \ + PoseSolverGetPosesRequest, \ + PoseSolverGetPosesResponse, \ + PoseSolverSetExtrinsicRequest, \ + PoseSolverSetIntrinsicRequest, \ + PoseSolverSetReferenceRequest, \ + PoseSolverSetTargetsRequest, \ + PoseSolverStartRequest, \ + PoseSolverStopRequest +from.pose_solver_api import PoseSolverAPI diff --git a/src/pose_solver/api.py b/src/pose_solver/api.py index a2c6081..a8ef348 100644 --- a/src/pose_solver/api.py +++ b/src/pose_solver/api.py @@ -16,7 +16,7 @@ class PoseSolverAddDetectorFrameRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "add_marker_corners" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverAddDetectorFrameRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -30,7 +30,7 @@ class PoseSolverAddTargetMarkerRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "add_target_marker" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverAddTargetMarkerRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -43,7 +43,7 @@ class PoseSolverAddTargetBoardRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "add_target_board" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverAddTargetBoardRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -56,7 +56,7 @@ class PoseSolverAddTargetResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "add_marker_corners" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverAddTargetResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -69,7 +69,7 @@ class PoseSolverGetPosesRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "get_poses" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverGetPosesRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -80,7 +80,7 @@ class PoseSolverGetPosesResponse(MCTResponse): _TYPE_IDENTIFIER: Final[str] = "get_poses" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverGetPosesResponse._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -94,7 +94,7 @@ class PoseSolverSetExtrinsicRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "set_extrinsic_parameters" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverSetExtrinsicRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -108,7 +108,7 @@ class PoseSolverSetIntrinsicRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "set_intrinsic_parameters" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverSetIntrinsicRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -122,7 +122,7 @@ class PoseSolverSetReferenceRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "set_reference_marker" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverSetReferenceRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -136,7 +136,7 @@ class PoseSolverSetTargetsRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "set_targets" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverSetTargetsRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -149,7 +149,7 @@ class PoseSolverStartRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "start_pose_solver" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverStartRequest._TYPE_IDENTIFIER # noinspection PyTypeHints @@ -160,7 +160,7 @@ class PoseSolverStopRequest(MCTRequest): _TYPE_IDENTIFIER: Final[str] = "stop_pose_solver" @staticmethod - def parsable_type_identifier() -> str: + def type_identifier() -> str: return PoseSolverStopRequest._TYPE_IDENTIFIER # noinspection PyTypeHints diff --git a/src/pose_solver/pose_solver.py b/src/pose_solver/pose_solver.py index 6903eeb..72d09c3 100644 --- a/src/pose_solver/pose_solver.py +++ b/src/pose_solver/pose_solver.py @@ -97,14 +97,14 @@ def add_target( target: TargetBase ) -> None: for existing_target in self._targets: - if target.target_id == existing_target.target_id: + if target.label == existing_target.label: raise PoseSolverException( - f"Target with name {target.target_id} is already registered. " + f"Target with name {target.label} is already registered. " f"Please use a different name, and also make sure you are not adding the same target twice.") marker_ids = target.get_marker_ids() for marker_id in marker_ids: if marker_id in self._marker_target_map: - target_id: str = self._marker_target_map[marker_id].target_id + target_id: str = self._marker_target_map[marker_id].label raise PoseSolverException( f"Marker {marker_id} is already used with target {target_id} and it cannot be reused.") target_index = len(self._targets) @@ -171,7 +171,7 @@ def set_reference_target( ) -> None: found: bool = False for target_index, target in enumerate(self._targets): - if target.target_id == target_id: + if target.label == target_id: self._targets[0], self._targets[target_index] = self._targets[target_index], self._targets[0] self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) found = True @@ -401,7 +401,7 @@ def update(self) -> None: # We estimate the pose of each target based on the calculated intersections # and the rays projected from each detector for target in self._targets: - if target.target_id == str(reference_target.target_id): + if target.label == str(reference_target.label): continue # everything is expressed relative to the reference... detected_marker_ids_in_target: list[str] = target.get_marker_ids() @@ -436,7 +436,7 @@ def update(self) -> None: detected_to_detector: numpy.ndarray = detected_to_detector_matrix4x4.as_numpy_array() detector_to_reference: numpy.ndarray = self._poses_by_detector_label[detector_label].as_numpy_array() detected_to_reference: numpy.ndarray = detector_to_reference @ detected_to_detector - self._poses_by_target_id[target.target_id] = Matrix4x4.from_numpy_array(detected_to_reference) + self._poses_by_target_id[target.label] = Matrix4x4.from_numpy_array(detected_to_reference) else: # Fill in the required variables for the customized iterative closest point detected_known_points: list[list[float]] = list(itertools.chain.from_iterable([ @@ -475,4 +475,4 @@ def update(self) -> None: source_ray_points=detected_ray_points, target_rays=reference_rays, parameters=iterative_closest_point_parameters) - self._poses_by_target_id[target.target_id] = icp_output.source_to_target_matrix + self._poses_by_target_id[target.label] = icp_output.source_to_target_matrix diff --git a/src/pose_solver/pose_solver_api.py b/src/pose_solver/pose_solver_api.py index 806ffd5..ec0f709 100644 --- a/src/pose_solver/pose_solver_api.py +++ b/src/pose_solver/pose_solver_api.py @@ -21,20 +21,38 @@ MCTResponse, \ PythonUtils from src.common.structures import \ - Pose, \ - PoseSolverStatus + Pose +from enum import StrEnum import logging -from typing import Callable +from typing import Callable, Final logger = logging.getLogger(__name__) +_ROLE_LABEL: Final[str] = "pose_solver" + + class PoseSolverAPI(MCTComponent): """ API-friendly layer overtop of a PoseSolver """ - _status: PoseSolverStatus + + class Status: + + class Solve(StrEnum): + STOPPED: Final[int] = "stopped" + RUNNING: Final[int] = "running" + FAILURE: Final[int] = "failure" + + solve_status: Solve + solve_errors: list[str] + + def __init__(self): + self.solve_status = PoseSolverAPI.Status.Solve.STOPPED + self.solve_errors = list() + + _status: Status _pose_solver: PoseSolver def __init__( @@ -46,7 +64,7 @@ def __init__( status_source_label=configuration.serial_identifier, send_status_messages_to_logger=True) self._pose_solver = pose_solver - self._status = PoseSolverStatus() + self._status = PoseSolverAPI.Status() def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: request: PoseSolverAddDetectorFrameRequest = PythonUtils.get_kwarg( @@ -83,6 +101,10 @@ def get_poses(self, **_kwargs) -> PoseSolverGetPosesResponse | ErrorResponse: detector_poses=detector_poses, target_poses=target_poses) + @staticmethod + def get_role_label(): + return _ROLE_LABEL + def set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: request: PoseSolverSetExtrinsicRequest = PythonUtils.get_kwarg( kwargs=kwargs, @@ -132,11 +154,11 @@ def set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def start_pose_solver(self, **_kwargs) -> EmptyResponse: - self._status.solve_status = PoseSolverStatus.Solve.RUNNING + self._status.solve_status = PoseSolverAPI.Status.Solve.RUNNING return EmptyResponse() def stop_pose_solver(self, **_kwargs) -> EmptyResponse: - self._status.solve_status = PoseSolverStatus.Solve.STOPPED + self._status.solve_status = PoseSolverAPI.Status.Solve.STOPPED return EmptyResponse() def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: @@ -156,5 +178,5 @@ def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCT async def update(self): if self.time_sync_active: return - if self._status.solve_status == PoseSolverStatus.Solve.RUNNING: + if self._status.solve_status == PoseSolverAPI.Status.Solve.RUNNING: self._pose_solver.update() diff --git a/src/pose_solver/structures.py b/src/pose_solver/structures.py index 8626072..3032f92 100644 --- a/src/pose_solver/structures.py +++ b/src/pose_solver/structures.py @@ -1,4 +1,5 @@ from src.common.structures import \ + Annotation, \ DetectorFrame import cv2.aruco import datetime @@ -23,10 +24,15 @@ def __init__( def _init_corners_by_marker_id(self): self._corners_by_marker_id = dict() - for snapshot in self._frame.detected_marker_snapshots: - self._corners_by_marker_id[snapshot.label] = [ - [corner_image_point.x_px, corner_image_point.y_px] - for corner_image_point in snapshot.corner_image_points] + annotations: list[Annotation] = self._frame.annotations_identified + for annotation in annotations: + base_label: str = annotation.base_label() + if base_label in self._corners_by_marker_id.keys(): + continue + self._corners_by_marker_id[base_label] = [ + [annotation.x_px, annotation.y_px] + for annotation in annotations + if annotation.base_label() == base_label] def get_detector_label(self) -> str: return self._detector_label @@ -51,7 +57,7 @@ def get_marker_ids_detected(self) -> list[str]: def get_timestamp_utc(self): if self._timestamp_utc is None: - self._timestamp_utc = self._frame.timestamp_utc() + self._timestamp_utc = self._frame.timestamp_utc return self._timestamp_utc diff --git a/src/slicer_connection.py b/src/slicer_connection.py index 25caa39..359665f 100644 --- a/src/slicer_connection.py +++ b/src/slicer_connection.py @@ -1,22 +1,11 @@ -import asyncio import sys import hjson -import numpy as np import pyigtl import time as t import logging -from src.common.api import MCTRequestSeries -from src.common.structures.mct_component import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER -from src.common.structures import TargetBase from src.controller.mct_controller import MCTController -from ipaddress import IPv4Address -from src.pose_solver.api import PoseSolverAddTargetMarkerRequest -from src.pose_solver.api import TargetMarker -from src.pose_solver.api import PoseSolverSetReferenceRequest - -from src.controller import Connection # Input filepath is specified by command line arguments if len(sys.argv) < 2: diff --git a/src/util/generate_target_definition_from_charuco.py b/src/util/generate_target_definition_from_charuco.py index 9669876..8815921 100644 --- a/src/util/generate_target_definition_from_charuco.py +++ b/src/util/generate_target_definition_from_charuco.py @@ -1,10 +1,10 @@ +from src.implementations.common_aruco_opencv import ArucoOpenCVCommon from src.common.structures import \ - CharucoBoardSpecification, \ TargetBoard from src.common.structures import Marker -board: CharucoBoardSpecification = CharucoBoardSpecification() +board: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() points: list[list[float]] = board.get_marker_corner_points() markers: list[Marker] = list() POINTS_PER_MARKER: int = 4 diff --git a/src/util/measure_detector_to_reference.py b/src/util/measure_detector_to_reference.py index f24dd3d..c616964 100644 --- a/src/util/measure_detector_to_reference.py +++ b/src/util/measure_detector_to_reference.py @@ -1,19 +1,17 @@ import asyncio import sys import hjson -from ipaddress import IPv4Address import json import logging import numpy as np import os from time import sleep -from timeit import main from scipy.spatial.transform import Rotation as R from src.board_builder.board_builder import BoardBuilder -from src.common.structures.mct_component import COMPONENT_ROLE_LABEL_DETECTOR, COMPONENT_ROLE_LABEL_POSE_SOLVER from src.controller import Connection, MCTController from src.common import MathUtils +from src.detector import Detector # input_filepath = "/home/adminpi5/Documents/MCSTrack/data/measure_detector_to_reference_config.json" if len(sys.argv) < 2: @@ -42,7 +40,7 @@ async def main(): for detector in detectors: controller.add_connection(Connection.ComponentAddress( label=detector['label'], - role=COMPONENT_ROLE_LABEL_DETECTOR, + role=Detector.get_role_label(), ip_address=detector['ip_address'], port=detector['port'])) all_measured_transforms_by_detector[detector['label']] = [] @@ -67,7 +65,7 @@ async def main(): controller.update() frame = controller.get_live_detector_frame(detector_label) - detectors_and_their_frames[detector_label] = frame.detected_marker_snapshots + detectors_and_their_frames[detector_label] = frame.annotations_identified board_builder.locate_reference_board(detectors_and_their_frames) diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index a84e1be..5a047d8 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -2,19 +2,21 @@ import numpy import os import re -from src.common import ImageUtils, StatusMessageSource +from src.common import \ + ImageUtils, \ + IntrinsicCalibrator, \ + StatusMessageSource from src.common.structures import \ - CORNER_REFINEMENT_METHOD_SUBPIX, \ ImageResolution, \ - IntrinsicCalibration, \ KeyValueSimpleAny, \ KeyValueSimpleString, \ - MarkerSnapshot -from src.detector import \ - IntrinsicCalibrator + Annotation +from src.implementations.common_aruco_opencv import \ + ArucoOpenCVCommon from src.implementations.annotator_aruco_opencv import \ - ArucoOpenCVAnnotator, \ - KEY_CORNER_REFINEMENT_METHOD + ArucoOpenCVAnnotator +from src.implementations.intrinsic_charuco_opencv import \ + CharucoOpenCVIntrinsicCalibrator from tempfile import TemporaryDirectory from typing import Final import unittest @@ -26,8 +28,8 @@ IMAGE_RESOLUTION: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) MARKER_DETECTION_PARAMETERS: list[KeyValueSimpleAny] = [ KeyValueSimpleString( - key=KEY_CORNER_REFINEMENT_METHOD, - value=CORNER_REFINEMENT_METHOD_SUBPIX)] + key=ArucoOpenCVCommon.KEY_CORNER_REFINEMENT_METHOD, + value=ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_SUBPIX)] class TestPoseSolver(unittest.TestCase): @@ -67,9 +69,9 @@ def test(self): # To simplify our lives and ensure a reasonable result, # we'll calibrate all cameras with the same set of input images. # We'll use all images from the A# and B# sets of frames. - calibration_result: IntrinsicCalibration | None = None + calibration_result: CharucoOpenCVIntrinsicCalibrator | None with TemporaryDirectory() as temppath: - calibrator: IntrinsicCalibrator = IntrinsicCalibrator( + calibrator: CharucoOpenCVIntrinsicCalibrator = CharucoOpenCVIntrinsicCalibrator( configuration=IntrinsicCalibrator.Configuration(data_path=temppath), status_message_source=status_message_source) for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): @@ -79,15 +81,13 @@ def test(self): image: numpy.ndarray = cv2.imread(image_filepath) image_base64: str = ImageUtils.image_to_base64(image) calibrator.add_image(image_base64) - _, calibration_result = calibrator.calculate( - image_resolution=IMAGE_RESOLUTION, - marker_parameters=MARKER_DETECTION_PARAMETERS) + _, calibration_result = calibrator.calculate(image_resolution=IMAGE_RESOLUTION) marker: ArucoOpenCVAnnotator = ArucoOpenCVAnnotator( configuration={"method": "aruco_opencv"}, status_message_source=status_message_source) marker.set_parameters(parameters=MARKER_DETECTION_PARAMETERS) - image_marker_snapshots: dict[str, dict[str, list[MarkerSnapshot]]] = dict() + image_marker_snapshots: dict[str, dict[str, list[Annotation]]] = dict() detection_count: int = 0 for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): for frame_id, image_filepath in image_filepaths_by_frame_id.items(): @@ -95,7 +95,7 @@ def test(self): image_marker_snapshots[camera_id] = dict() image: numpy.ndarray = cv2.imread(image_filepath) marker.update(image) - marker_snapshots: list[MarkerSnapshot] = marker.get_markers_detected() + marker_snapshots: list[Annotation] = marker.get_markers_detected() image_marker_snapshots[camera_id][frame_id] = marker_snapshots detection_count += len(marker_snapshots) message = f"{detection_count} detections." diff --git a/test/test_math_utils.py b/test/test_math_utils.py index 4b86ba0..4bb1406 100644 --- a/test/test_math_utils.py +++ b/test/test_math_utils.py @@ -63,7 +63,7 @@ def test_iterative_closest_point(self) -> None: Ray(source_point=origin, direction=[sqrt3, sqrt3, -sqrt3]), Ray(source_point=origin, direction=[sqrt3, -sqrt3, -sqrt3]), Ray(source_point=origin, direction=[-sqrt3, -sqrt3, -sqrt3])] - begin_datetime = datetime.datetime.utcnow() + begin_datetime = datetime.datetime.now(tz=datetime.timezone.utc) icp_parameters = IterativeClosestPointParameters( termination_iteration_count=100, termination_delta_translation=0.001, @@ -77,7 +77,7 @@ def test_iterative_closest_point(self) -> None: target_rays=target_rays, parameters=icp_parameters) source_to_target_matrix = icp_output.source_to_target_matrix.as_numpy_array() - end_datetime = datetime.datetime.utcnow() + end_datetime = datetime.datetime.now(tz=datetime.timezone.utc) duration = (end_datetime - begin_datetime) duration_seconds = duration.seconds + (duration.microseconds / 1000000.0) message = str() diff --git a/test/test_pose_solver.py b/test/test_pose_solver.py index c8ddb1d..f39d777 100644 --- a/test/test_pose_solver.py +++ b/test/test_pose_solver.py @@ -3,10 +3,10 @@ DetectorFrame, \ ImageResolution, \ IntrinsicParameters, \ - MarkerCornerImagePoint, \ - MarkerSnapshot, \ + Annotation, \ Matrix4x4, \ Pose, \ + RELATION_CHARACTER, \ TargetMarker import datetime from typing import Final @@ -18,13 +18,13 @@ REFERENCE_TARGET_ID: Final[str] = "reference" REFERENCE_MARKER_ID: Final[str] = "0" REFERENCE_MARKER_TARGET: Final[TargetMarker] = TargetMarker( - target_id=REFERENCE_TARGET_ID, + label=REFERENCE_TARGET_ID, marker_id=REFERENCE_MARKER_ID, marker_size=MARKER_SIZE_MM) TARGET_TARGET_ID: Final[str] = "target" TARGET_MARKER_ID: Final[str] = "1" TARGET_MARKER_TARGET: Final[TargetMarker] = TargetMarker( - target_id=TARGET_TARGET_ID, + label=TARGET_TARGET_ID, marker_id=TARGET_MARKER_ID, marker_size=MARKER_SIZE_MM) DETECTOR_RED_NAME: Final[str] = "det_red" @@ -102,34 +102,27 @@ def test_single_camera_viewing_target_marker(self): # Note that single-marker tests are particularly susceptible to reference pose ambiguity now_utc = datetime.datetime.now(datetime.timezone.utc) pose_solver: PoseSolver = PoseSolver() - # TODO: The following line shall be replaced upon implementation of an appropriate alternative + # TODO: The following line shall be replaced upon implementation of an appropriate mechanism pose_solver._parameters.minimum_detector_count = 1 pose_solver.set_intrinsic_parameters( detector_label=DETECTOR_RED_NAME, intrinsic_parameters=DETECTOR_RED_INTRINSICS) pose_solver.add_target(target=REFERENCE_MARKER_TARGET) pose_solver.add_target(target=TARGET_MARKER_TARGET) - pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.target_id) + pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.label) # Reference is on the left, target is on the right, both in the same plane and along the x-axis of the image. pose_solver.add_detector_frame( detector_label=DETECTOR_RED_NAME, detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=375, y_px=347), - MarkerCornerImagePoint(x_px=415, y_px=346), - MarkerCornerImagePoint(x_px=416, y_px=386), - MarkerCornerImagePoint(x_px=376, y_px=386)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=541, y_px=347), - MarkerCornerImagePoint(x_px=581, y_px=348), - MarkerCornerImagePoint(x_px=580, y_px=388), - MarkerCornerImagePoint(x_px=540, y_px=387)])], - rejected_marker_snapshots=list(), + annotations=[ + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=375, y_px=347), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=415, y_px=346), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=416, y_px=386), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=376, y_px=386), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=541, y_px=347), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=581, y_px=348), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=580, y_px=388), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=540, y_px=387)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.update() @@ -181,89 +174,61 @@ def test_four_cameras_viewing_target_marker(self): intrinsic_parameters=DETECTOR_YELLOW_INTRINSICS) pose_solver.add_target(target=REFERENCE_MARKER_TARGET) pose_solver.add_target(target=TARGET_MARKER_TARGET) - pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.target_id) + pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.label) pose_solver.add_detector_frame( detector_label=DETECTOR_RED_NAME, detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=157, y_px=210), - MarkerCornerImagePoint(x_px=165, y_px=221), - MarkerCornerImagePoint(x_px=139, y_px=229), - MarkerCornerImagePoint(x_px=131, y_px=217)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=196, y_px=266), - MarkerCornerImagePoint(x_px=206, y_px=281), - MarkerCornerImagePoint(x_px=178, y_px=291), - MarkerCornerImagePoint(x_px=167, y_px=275)])], - rejected_marker_snapshots=list(), + annotations=[ + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=157, y_px=210), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=165, y_px=221), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=139, y_px=229), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=131, y_px=217), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=196, y_px=266), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=206, y_px=281), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=178, y_px=291), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=167, y_px=275)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.add_detector_frame( detector_label=DETECTOR_SKY_NAME, detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=190, y_px=234), - MarkerCornerImagePoint(x_px=219, y_px=246), - MarkerCornerImagePoint(x_px=195, y_px=270), - MarkerCornerImagePoint(x_px=166, y_px=257)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=317, y_px=290), - MarkerCornerImagePoint(x_px=352, y_px=306), - MarkerCornerImagePoint(x_px=332, y_px=333), - MarkerCornerImagePoint(x_px=296, y_px=317)])], - rejected_marker_snapshots=list(), + annotations=[ + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=190, y_px=234), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=219, y_px=246), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=195, y_px=270), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=166, y_px=257), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=317, y_px=290), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=352, y_px=306), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=332, y_px=333), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=296, y_px=317)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.add_detector_frame( detector_label=DETECTOR_GREEN_NAME, detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=247, y_px=304), - MarkerCornerImagePoint(x_px=283, y_px=296), - MarkerCornerImagePoint(x_px=291, y_px=326), - MarkerCornerImagePoint(x_px=254, y_px=334)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=392, y_px=277), - MarkerCornerImagePoint(x_px=426, y_px=271), - MarkerCornerImagePoint(x_px=438, y_px=299), - MarkerCornerImagePoint(x_px=403, y_px=305)])], - rejected_marker_snapshots=list(), + annotations=[ + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=247, y_px=304), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=283, y_px=296), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=291, y_px=326), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=254, y_px=334), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=392, y_px=277), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=426, y_px=271), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=438, y_px=299), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=403, y_px=305)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.add_detector_frame( detector_label=DETECTOR_YELLOW_NAME, detector_frame=DetectorFrame( - detected_marker_snapshots=[ - MarkerSnapshot( - label=str(REFERENCE_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=275, y_px=277), - MarkerCornerImagePoint(x_px=289, y_px=251), - MarkerCornerImagePoint(x_px=321, y_px=261), - MarkerCornerImagePoint(x_px=306, y_px=288)]), - MarkerSnapshot( - label=str(TARGET_MARKER_ID), - corner_image_points=[ - MarkerCornerImagePoint(x_px=332, y_px=177), - MarkerCornerImagePoint(x_px=344, y_px=156), - MarkerCornerImagePoint(x_px=372, y_px=163), - MarkerCornerImagePoint(x_px=361, y_px=185)])], - rejected_marker_snapshots=list(), + annotations=[ + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=275, y_px=277), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=289, y_px=251), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=321, y_px=261), + Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=306, y_px=288), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=332, y_px=177), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=344, y_px=156), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=372, y_px=163), + Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=361, y_px=185)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.update() From bc3f66ba33e945123904881aed45f9c09e983364 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Tue, 15 Jul 2025 10:12:49 -0400 Subject: [PATCH 08/33] WIP: Continued consolidation --- src/common/__init__.py | 4 +-- src/common/intrinsic_calibrator.py | 49 +++++++++++++------------- src/common/mct_component.py | 34 +++++++++++++++--- src/common/status_messages.py | 29 +++++++-------- src/common/structures/__init__.py | 1 - src/common/structures/serialization.py | 6 ---- src/common/util/__init__.py | 2 -- src/common/util/io_utils.py | 6 ++-- src/common/util/network_utils.py | 17 --------- src/common/util/python_utils.py | 42 ---------------------- src/detector/detector.py | 25 +++++++------ src/detector/detector_app.py | 14 +++----- src/pose_solver/pose_solver_api.py | 15 ++++---- 13 files changed, 93 insertions(+), 151 deletions(-) delete mode 100644 src/common/util/network_utils.py delete mode 100644 src/common/util/python_utils.py diff --git a/src/common/__init__.py b/src/common/__init__.py index 230fe4c..6493d22 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -30,6 +30,4 @@ from .util import \ ImageUtils, \ IOUtils, \ - MathUtils, \ - NetworkUtils, \ - PythonUtils + MathUtils diff --git a/src/common/intrinsic_calibrator.py b/src/common/intrinsic_calibrator.py index 8d2492f..bfa9c45 100644 --- a/src/common/intrinsic_calibrator.py +++ b/src/common/intrinsic_calibrator.py @@ -1,9 +1,8 @@ from .exceptions import MCTError -from .status_messages import StatusMessageSource +from .status_messages import SeverityLabel, StatusMessageSource from .structures import \ ImageResolution, \ - IntrinsicCalibration, \ - KeyValueSimpleAny + IntrinsicCalibration from .util import \ ImageUtils, \ IOUtils @@ -36,9 +35,9 @@ class _Configuration(BaseModel): class _ImageState(StrEnum): - IGNORE: Final[int] = "ignore" - SELECT: Final[int] = "select" - DELETE: Final[int] = "delete" # stage for deletion + IGNORE = "ignore" + SELECT = "select" + DELETE = "delete" # stage for deletion class _ImageMetadata(BaseModel): @@ -51,13 +50,13 @@ class _ImageMetadata(BaseModel): class _ResultState(StrEnum): # indicate to use this calibration (as opposed to simply storing it) # normally there shall only ever be one ACTIVE calibration for a given image resolution - ACTIVE: Final[str] = "active" + ACTIVE = "active" # store the calibration, but don't mark it for use - RETAIN: Final[str] = "retain" + RETAIN = "retain" # stage for deletion - DELETE: Final[str] = "delete" + DELETE = "delete" class _ResultMetadata(BaseModel): @@ -133,7 +132,7 @@ def __init__( self._status_message_source = status_message_source if not self._exists_on_filesystem(path=self._configuration.data_path, pathtype="path", create_path=True): self._status_message_source.enqueue_status_message( - severity="critical", + severity=SeverityLabel.CRITICAL, message="Data path does not exist and could not be created.") detailed_message: str = f"{self._configuration.data_path} does not exist and could not be created." logger.critical(detailed_message) @@ -143,7 +142,7 @@ def __init__( "In order to avoid data loss, the software will now abort. " \ "Please manually correct or remove the file in the filesystem." logger.critical(message) - self._status_message_source.enqueue_status_message(severity="critical", message=message) + self._status_message_source.enqueue_status_message(severity=SeverityLabel.CRITICAL, message=message) raise RuntimeError(message) def add_image( @@ -205,7 +204,7 @@ def calculate( image_identifier=image_metadata.identifier) if not self._exists_on_filesystem(path=image_filepath, pathtype="filepath"): self._status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Image {image_metadata.identifier} was not found. " f"It will be omitted from the calibration.") continue @@ -219,7 +218,7 @@ def calculate( filepath=result_filepath, json_dict=intrinsic_calibration.model_dump(), on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=msg), on_error_for_dev=logger.error, ignore_none=True) @@ -246,13 +245,13 @@ def _delete_if_exists(self, filepath: str): except FileNotFoundError as e: logger.error(e) self._status_message_source.enqueue_status_message( - severity="warning", # It *is* an internal error, but it has few consequences for user... so warning + severity=SeverityLabel.ERROR, message=f"Failed to remove a file from the calibrator because it does not exist. " f"See its internal log for details.") except OSError as e: logger.error(e) self._status_message_source.enqueue_status_message( - severity="warning", # It *is* an internal error, but it has few consequences for user... so warning + severity=SeverityLabel.ERROR, message=f"Failed to remove a file from the calibrator due to an unexpected reason. " f"See its internal log for details.") @@ -290,7 +289,7 @@ def _exists_on_filesystem( pathtype=pathtype, create_path=create_path, on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=msg), on_error_for_dev=logger.error) @@ -392,12 +391,12 @@ def get_result_active( if active_count < 1: self._status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"No result metadata is active for resolution {str(image_resolution)}." "Returning latest result.") elif active_count > 1: self._status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Multiple result metadata are active for resolution {str(image_resolution)}. " "Returning latest active result. " "To recover from this ambiguous state, it is strong recommended to explicitly set " @@ -484,7 +483,7 @@ def load(self) -> bool: elif not os.path.isfile(calibration_map_filepath): logger.critical(f"Calibration map file location {calibration_map_filepath} exists but is not a file.") self._status_message_source.enqueue_status_message( - severity="critical", + severity=SeverityLabel.CRITICAL, message="Filepath location for calibration map exists but is not a file. " "Most likely a directory exists at that location, " "and it needs to be manually removed.") @@ -492,13 +491,13 @@ def load(self) -> bool: json_dict: dict = IOUtils.hjson_read( filepath=calibration_map_filepath, on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=msg), on_error_for_dev=logger.error) if not json_dict: logger.error(f"Failed to load calibration map from file {calibration_map_filepath}.") self._status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message="Failed to load calibration map from file.") return False calibration_map: IntrinsicCalibrator.DataMap @@ -507,7 +506,7 @@ def load(self) -> bool: except ValidationError as e: logger.error(e) self._status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message="Failed to parse calibration map from file.") return False self._calibration_map = calibration_map.as_dict() @@ -537,7 +536,7 @@ def save(self) -> None: filepath=self._map_filepath(), json_dict=IntrinsicCalibrator.DataMap.from_dict(self._calibration_map).model_dump(), on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=msg), on_error_for_dev=logger.error) @@ -562,7 +561,7 @@ def update_image_metadata( message=f"Image identifier {image_identifier} is not associated with any image.") elif found_count > 1: self._status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Image identifier {image_identifier} is associated with multiple images.") self.save() @@ -599,7 +598,7 @@ def update_result_metadata( message=f"Result identifier {result_identifier} is not associated with any result.") elif found_count > 1: self._status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Result identifier {result_identifier} is associated with multiple results. " "This suggests that the calibration map is in an inconsistent state. " "It may be prudent to either manually correct it, or recreate it.") diff --git a/src/common/mct_component.py b/src/common/mct_component.py index 44d566d..299e42d 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -18,8 +18,6 @@ from .structures import \ MCTDeserializable, \ MCTSerializationError -from .util import \ - PythonUtils import abc import datetime from fastapi import WebSocket, WebSocketDisconnect @@ -31,6 +29,7 @@ SerializableSingle = TypeVar('SerializableSingle', bound=MCTDeserializable) +T = TypeVar("T") class MCTComponent(abc.ABC): @@ -86,7 +85,7 @@ def dequeue_status_messages(self, **kwargs) -> DequeueStatusMessagesResponse: """ :key client_identifier: str """ - client_identifier: str = PythonUtils.get_kwarg( + client_identifier: str = self.get_kwarg( kwargs=kwargs, key="client_identifier", arg_type=str) @@ -95,6 +94,33 @@ def dequeue_status_messages(self, **kwargs) -> DequeueStatusMessagesResponse: return DequeueStatusMessagesResponse( status_messages=status_messages) + @staticmethod + def get_kwarg( + kwargs: dict, + key: str, + arg_type: type[T], + required: bool = True + ) -> T | None: + """ + :param kwargs: kwargs as a dict (without the "**") + :param key: key to search for + :param arg_type: expected type + :param required: If the keyword does not exist, then: + required == True -> Raise ValueError + required == False -> Return None + """ + + if key not in kwargs: + if required: + raise ValueError(f"Missing required key {key} in keyword arguments.") + return None + value: T = kwargs[key] + if not isinstance(value, arg_type): + raise ValueError( + f"Expected keyword argument {key} to be of type {arg_type.__name__}, " + f"but got {type(value).__name__}.") + return value + @staticmethod @abc.abstractmethod def get_role_label(): @@ -119,7 +145,7 @@ def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCT TimeSyncStopRequest: self.time_sync_stop} def timestamp_get(self, **kwargs) -> TimestampGetResponse: - request: TimestampGetRequest = PythonUtils.get_kwarg( + request: TimestampGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=TimestampGetRequest) diff --git a/src/common/status_messages.py b/src/common/status_messages.py index a358dc8..1350c6c 100644 --- a/src/common/status_messages.py +++ b/src/common/status_messages.py @@ -8,25 +8,20 @@ logger = logging.getLogger(__name__) -SEVERITY_LABEL_DEBUG: Final[str] = "debug" -SEVERITY_LABEL_INFO: Final[str] = "info" -SEVERITY_LABEL_WARNING: Final[str] = "warning" -SEVERITY_LABEL_ERROR: Final[str] = "error" -SEVERITY_LABEL_CRITICAL: Final[str] = "critical" class SeverityLabel(StrEnum): - DEBUG: Final[str] = SEVERITY_LABEL_DEBUG - INFO: Final[str] = SEVERITY_LABEL_INFO - WARNING: Final[str] = SEVERITY_LABEL_WARNING - ERROR: Final[str] = SEVERITY_LABEL_ERROR - CRITICAL: Final[str] = SEVERITY_LABEL_CRITICAL + DEBUG = "debug" + INFO = "info" + WARNING = "warning" + ERROR = "error" + CRITICAL = "critical" SEVERITY_LABEL_TO_INT: Final[dict[SeverityLabel, int]] = { - "debug": logging.DEBUG, - "info": logging.INFO, - "warning": logging.WARNING, - "error": logging.ERROR, - "critical": logging.CRITICAL} + SeverityLabel.DEBUG: logging.DEBUG, + SeverityLabel.INFO: logging.INFO, + SeverityLabel.WARNING: logging.WARNING, + SeverityLabel.ERROR: logging.ERROR, + SeverityLabel.CRITICAL: logging.CRITICAL} class StatusMessage(BaseModel): @@ -66,13 +61,13 @@ def add_status_subscriber( message: str = f"{subscriber_label} is now listening for status messages." self.enqueue_status_message( source_label=self._source_label, - severity="debug", + severity=SeverityLabel.DEBUG, message=message) else: message: str = f"{subscriber_label} is already in status message outboxes." self.enqueue_status_message( source_label=self._source_label, - severity="error", + severity=SeverityLabel.ERROR, message=message) def get_source_label(self) -> str: diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py index 4f759b8..650c134 100644 --- a/src/common/structures/__init__.py +++ b/src/common/structures/__init__.py @@ -25,7 +25,6 @@ KeyValueMetaEnum, \ KeyValueMetaFloat, \ KeyValueMetaInt, \ - key_value_meta_to_simple, \ MCTSerializationError, \ MCTDeserializable from .tracking import \ diff --git a/src/common/structures/serialization.py b/src/common/structures/serialization.py index ee873e0..01a7415 100644 --- a/src/common/structures/serialization.py +++ b/src/common/structures/serialization.py @@ -130,12 +130,6 @@ def to_simple(self) -> KeyValueSimpleInt: KeyValueMetaInt] -def key_value_meta_to_simple( - key_value_meta_list: list[KeyValueMetaAny] -) -> list[KeyValueSimpleAny]: - return [key_value_meta.to_simple() for key_value_meta in key_value_meta_list] - - DeserializableT = TypeVar('DeserializableT', bound='MCTParsable') diff --git a/src/common/util/__init__.py b/src/common/util/__init__.py index 91954f4..d91b4e7 100644 --- a/src/common/util/__init__.py +++ b/src/common/util/__init__.py @@ -1,5 +1,3 @@ from .image_utils import ImageUtils from .io_utils import IOUtils from .math_utils import MathUtils -from .network_utils import NetworkUtils -from .python_utils import PythonUtils diff --git a/src/common/util/io_utils.py b/src/common/util/io_utils.py index 6f7a1e9..9ab41d1 100644 --- a/src/common/util/io_utils.py +++ b/src/common/util/io_utils.py @@ -130,7 +130,7 @@ def json_write( create_path=True ): return False - if ignore_none is True: + if ignore_none: json_dict = IOUtils._remove_all_none_from_dict_recursive(json_dict) try: with open(filepath, 'w', encoding='utf-8') as output_file: @@ -143,10 +143,10 @@ def json_write( @staticmethod def _remove_all_none_from_dict_recursive( - input_dict + input_dict: dict ) -> dict: output_dict = dict(input_dict) - for key in output_dict: + for key in output_dict.keys(): if isinstance(output_dict[key], dict): output_dict[key] = IOUtils._remove_all_none_from_dict_recursive(output_dict[key]) elif output_dict[key] is None: diff --git a/src/common/util/network_utils.py b/src/common/util/network_utils.py deleted file mode 100644 index 0a7aee3..0000000 --- a/src/common/util/network_utils.py +++ /dev/null @@ -1,17 +0,0 @@ -from fastapi import Request, WebSocket - - -class NetworkUtils: - """ - A "class" to group related static functions, like in a namespace. - The class itself is not meant to be instantiated. - """ - - def __init__(self): - raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") - - @staticmethod - def client_identifier_from_connection( - connection: Request | WebSocket - ) -> str: - return f"{connection.client.host}:{connection.client.port}" diff --git a/src/common/util/python_utils.py b/src/common/util/python_utils.py deleted file mode 100644 index 8c0e799..0000000 --- a/src/common/util/python_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import TypeVar - - -T = TypeVar("T") - - -class PythonUtils: - """ - A "class" to group related static functions, like in a namespace. - The class itself is not meant to be instantiated. - """ - - def __init__(self): - raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") - - @staticmethod - def get_kwarg( - kwargs: dict, - key: str, - arg_type: type[T], - required: bool = True - ) -> T | None: - """ - :param kwargs: kwargs as a dict (without the "**") - :param key: key to search for - :param arg_type: expected type - :param required: If the keyword does not exist, then: - required == True -> Raise ValueError - required == False -> Return None - """ - - if key not in kwargs: - if required: - raise ValueError(f"Missing required key {key} in keyword arguments.") - return None - value: T = kwargs[key] - if not isinstance(value, arg_type): - raise ValueError( - f"Expected keyword argument {key} to be of type {arg_type.__name__}, " - f"but got {type(value).__name__}.") - return value - diff --git a/src/detector/detector.py b/src/detector/detector.py index 4080514..de81b2e 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -44,8 +44,7 @@ MCTComponent, \ MCTAnnotatorRuntimeError, \ MCTRequest, \ - MCTResponse, \ - PythonUtils + MCTResponse from src.common.structures import \ DetectorFrame, \ ImageResolution, \ @@ -107,7 +106,7 @@ def __del__(self): self._camera.__del__() def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | ErrorResponse: - request: CalibrationCalculateRequest = PythonUtils.get_kwarg( + request: CalibrationCalculateRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationCalculateRequest) @@ -138,7 +137,7 @@ def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | Erro return CalibrationImageAddResponse(image_identifier=image_identifier) def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | ErrorResponse: - request: CalibrationImageGetRequest = PythonUtils.get_kwarg( + request: CalibrationImageGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationImageGetRequest) @@ -150,7 +149,7 @@ def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | Error return CalibrationImageGetResponse(image_base64=image_base64) def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataListResponse | ErrorResponse: - request: CalibrationImageMetadataListRequest = PythonUtils.get_kwarg( + request: CalibrationImageMetadataListRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationImageMetadataListRequest) @@ -163,7 +162,7 @@ def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataL return CalibrationImageMetadataListResponse(metadata_list=image_metadata_list) def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationImageMetadataUpdateRequest = PythonUtils.get_kwarg( + request: CalibrationImageMetadataUpdateRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationImageMetadataUpdateRequest) @@ -185,7 +184,7 @@ def calibration_resolution_list(self, **_kwargs) -> CalibrationResolutionListRes return CalibrationResolutionListResponse(resolutions=resolutions) def calibration_result_get(self, **kwargs) -> CalibrationResultGetResponse | ErrorResponse: - request: CalibrationResultGetRequest = PythonUtils.get_kwarg( + request: CalibrationResultGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationResultGetRequest) @@ -206,7 +205,7 @@ def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActive return CalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadataListResponse | ErrorResponse: - request: CalibrationResultMetadataListRequest = PythonUtils.get_kwarg( + request: CalibrationResultMetadataListRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationResultMetadataListRequest) @@ -219,7 +218,7 @@ def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadat return CalibrationResultMetadataListResponse(metadata_list=result_metadata_list) def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationResultMetadataUpdateRequest = PythonUtils.get_kwarg( + request: CalibrationResultMetadataUpdateRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CalibrationResultMetadataUpdateRequest) @@ -233,7 +232,7 @@ def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorR return EmptyResponse() def camera_image_get(self, **kwargs) -> CameraImageGetResponse | ErrorResponse: - request: CameraImageGetRequest = PythonUtils.get_kwarg( + request: CameraImageGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CameraImageGetRequest) @@ -257,7 +256,7 @@ def camera_parameters_get(self, **_kwargs) -> CameraParametersGetResponse | Erro return CameraParametersGetResponse(parameters=parameters) def camera_parameters_set(self, **kwargs) -> CameraParametersSetResponse | ErrorResponse: - request: CameraParametersSetRequest = PythonUtils.get_kwarg( + request: CameraParametersSetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=CameraParametersSetRequest) @@ -278,7 +277,7 @@ def camera_resolution_get(self, **_kwargs) -> CameraResolutionGetResponse | Erro return CameraResolutionGetResponse(resolution=image_resolution) def detector_frame_get(self, **kwargs) -> DetectorFrameGetResponse | ErrorResponse: - request: DetectorFrameGetRequest = PythonUtils.get_kwarg( + request: DetectorFrameGetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=DetectorFrameGetRequest) @@ -322,7 +321,7 @@ def marker_parameters_get(self, **_kwargs) -> AnnotatorParametersGetResponse | E return AnnotatorParametersGetResponse(parameters=parameters) def marker_parameters_set(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: AnnotatorParametersSetRequest = PythonUtils.get_kwarg( + request: AnnotatorParametersSetRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=AnnotatorParametersSetRequest) diff --git a/src/detector/detector_app.py b/src/detector/detector_app.py index b404836..e283da3 100644 --- a/src/detector/detector_app.py +++ b/src/detector/detector_app.py @@ -79,18 +79,12 @@ def create_app() -> FastAPI: allow_headers=["*"]) @detector_app.head("/detector/start") - async def detector_start( - http_request: Request - ) -> None: - client_identifier: str = NetworkUtils.client_identifier_from_connection(connection=http_request) - detector.detector_start(client_identifier=client_identifier) + async def detector_start() -> None: + detector.detector_start() @detector_app.head("/detector/stop") - async def detector_stop( - http_request: Request - ) -> None: - client_identifier: str = NetworkUtils.client_identifier_from_connection(connection=http_request) - detector.detector_stop(client_identifier=client_identifier) + async def detector_stop() -> None: + detector.detector_stop() @detector_app.post("/detector/start_time_sync") async def start_time_sync( diff --git a/src/pose_solver/pose_solver_api.py b/src/pose_solver/pose_solver_api.py index ec0f709..0f3ae3e 100644 --- a/src/pose_solver/pose_solver_api.py +++ b/src/pose_solver/pose_solver_api.py @@ -18,8 +18,7 @@ ErrorResponse, \ MCTComponent, \ MCTRequest, \ - MCTResponse, \ - PythonUtils + MCTResponse from src.common.structures import \ Pose from enum import StrEnum @@ -67,7 +66,7 @@ def __init__( self._status = PoseSolverAPI.Status() def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddDetectorFrameRequest = PythonUtils.get_kwarg( + request: PoseSolverAddDetectorFrameRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverAddDetectorFrameRequest) @@ -80,7 +79,7 @@ def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def add_target(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddTargetMarkerRequest = PythonUtils.get_kwarg( + request: PoseSolverAddTargetMarkerRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverAddTargetMarkerRequest) @@ -106,7 +105,7 @@ def get_role_label(): return _ROLE_LABEL def set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetExtrinsicRequest = PythonUtils.get_kwarg( + request: PoseSolverSetExtrinsicRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetExtrinsicRequest) @@ -119,7 +118,7 @@ def set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def set_intrinsic_parameters(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetIntrinsicRequest = PythonUtils.get_kwarg( + request: PoseSolverSetIntrinsicRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetIntrinsicRequest) @@ -132,7 +131,7 @@ def set_intrinsic_parameters(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetReferenceRequest = PythonUtils.get_kwarg( + request: PoseSolverSetReferenceRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetReferenceRequest) @@ -143,7 +142,7 @@ def set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetTargetsRequest = PythonUtils.get_kwarg( + request: PoseSolverSetTargetsRequest = self.get_kwarg( kwargs=kwargs, key="request", arg_type=PoseSolverSetTargetsRequest) From 94d59b4560d8e67f870f758d75b9c1c1e3cfff5d Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Tue, 15 Jul 2025 14:05:12 -0400 Subject: [PATCH 09/33] WIP: Simplified target definition and pose solver logic --- doc/glossary.md | 11 + src/board_builder/board_builder.py | 18 +- src/board_builder/structures.py | 232 ++++++++++++++++++ src/board_builder/structures/__init__.py | 7 - .../structures/marker_corners.py | 21 -- .../structures/marker_ray_set.py | 36 --- src/board_builder/structures/matrix_node.py | 9 - src/board_builder/structures/pose_data.py | 24 -- src/board_builder/structures/pose_location.py | 73 ------ src/board_builder/structures/target_base.py | 5 - src/board_builder/structures/target_marker.py | 7 - .../test/accuracy/accuracy_test.py | 12 +- .../test/accuracy/utils/generate_data.py | 2 +- .../test/accuracy/utils/graph_renderer.py | 2 +- .../utils/board_builder_pose_solver.py | 13 +- src/common/annotator.py | 7 +- src/common/camera.py | 8 +- src/common/mct_component.py | 10 +- src/common/structures/__init__.py | 9 +- src/common/structures/image.py | 25 +- src/common/structures/linear_algebra.py | 30 ++- src/common/structures/tracking.py | 91 +------ src/common/util/math_utils.py | 67 +++-- src/controller/configuration.py | 10 +- src/controller/connection.py | 92 +++---- src/controller/mct_controller.py | 71 +++--- src/detector/detector_app.py | 3 +- src/gui/gui.py | 4 +- src/gui/panels/base_panel.py | 13 +- src/gui/panels/board_builder_panel.py | 4 +- src/gui/panels/detector_panel.py | 4 +- src/gui/panels/pose_solver_panel.py | 5 +- src/implementations/annotator_aruco_opencv.py | 22 +- src/implementations/common_aruco_opencv.py | 21 +- src/pose_solver/__init__.py | 3 +- src/pose_solver/api.py | 30 +-- src/pose_solver/exceptions.py | 6 +- src/pose_solver/pose_solver.py | 210 ++++++++-------- src/pose_solver/pose_solver_api.py | 14 +- src/pose_solver/pose_solver_app.py | 4 +- src/pose_solver/structures.py | 133 ++++------ ...generate_target_definition_from_charuco.py | 21 +- test/test_pose_solver.py | 97 ++++---- 43 files changed, 732 insertions(+), 754 deletions(-) create mode 100644 doc/glossary.md create mode 100644 src/board_builder/structures.py delete mode 100644 src/board_builder/structures/__init__.py delete mode 100644 src/board_builder/structures/marker_corners.py delete mode 100644 src/board_builder/structures/marker_ray_set.py delete mode 100644 src/board_builder/structures/matrix_node.py delete mode 100644 src/board_builder/structures/pose_data.py delete mode 100644 src/board_builder/structures/pose_location.py delete mode 100644 src/board_builder/structures/target_base.py delete mode 100644 src/board_builder/structures/target_marker.py diff --git a/doc/glossary.md b/doc/glossary.md new file mode 100644 index 0000000..00eddb1 --- /dev/null +++ b/doc/glossary.md @@ -0,0 +1,11 @@ +# Glossary +- Annotation: A feature that is detected in an image. +- Controller: Component responsible for coordinating communication between other components. +- Detector: Component responsible for capturing images, processing them, and generating Annotations for tracking. +- Feature: Something that is identifiable either in 2D or 3D. +- Landmark: A unique feature on a Target that has its own distinct 3D coordinates. +- Pose: a position and orientation. +- Pose Solver: Component responsible for receiving Annotations and calculating Poses per Target. +- Target: A definition of something to track. Currently these consist of Landmarks. + +Notes: Annotation, Feature, and Landmark are all distinct but tightly related concepts. diff --git a/src/board_builder/board_builder.py b/src/board_builder/board_builder.py index 4c21b1a..5258593 100644 --- a/src/board_builder/board_builder.py +++ b/src/board_builder/board_builder.py @@ -9,7 +9,7 @@ from .utils import BoardBuilderPoseSolver from .structures import PoseLocation, MarkerCorners from src.common.structures import Pose, Annotation, Matrix4x4 -from src.common.structures import Marker, TargetBoard +from .structures import Marker, TargetBoard _HOMOGENEOUS_POINT_COORD: Final[int] = 4 TESTED_BOARD_NAME: str = 'top_data.json' # If collecting data for repeatability test, specify the file name. cube_data.json, planar_data.json, top_data.json @@ -115,13 +115,13 @@ def _filter_markers_appearing_in_multiple_detectors(data): label_counts = defaultdict(int) for snapshots in data.values(): for snapshot in snapshots: - label_counts[snapshot.label] += 1 + label_counts[snapshot.feature_label] += 1 filtered_data = {} for key, snapshots in data.items(): filtered_snapshots = [ snapshot for snapshot in snapshots - if label_counts[snapshot.label] >= 2] + if label_counts[snapshot.feature_label] >= 2] if filtered_snapshots: filtered_data[key] = filtered_snapshots return filtered_data @@ -150,10 +150,10 @@ def _solve_pose(self, detector_data: dict[str, list[Annotation]], timestamp: dat timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: for marker_snapshot in detector_data[detector_name]: - if marker_snapshot.label not in list(self._index_to_marker_id.values()): - self.pose_solver.add_target_marker(int(marker_snapshot.label)) + if marker_snapshot.feature_label not in list(self._index_to_marker_id.values()): + self.pose_solver.add_target_marker(int(marker_snapshot.feature_label)) self._expand_relative_pose_matrix() - self._index_to_marker_id[self._matrix_id_index] = marker_snapshot.label + self._index_to_marker_id[self._matrix_id_index] = marker_snapshot.feature_label self._matrix_id_index += 1 for detector_name in detector_data: @@ -166,7 +166,7 @@ def _solve_pose(self, detector_data: dict[str, list[Annotation]], timestamp: dat [detector_data[detector_name][i+3].x_px, detector_data[detector_name][i+3].y_px]] marker_corners = MarkerCorners( detector_label=detector_name, - marker_id=int(detector_data[detector_name][i].label), + marker_id=int(detector_data[detector_name][i].feature_label), points=corners_list, timestamp=timestamp) self.pose_solver.add_marker_corners([marker_corners]) @@ -210,7 +210,7 @@ def _write_detector_data_to_recording_file(detector_data: dict[str, list[Annotat formatted_data[detector_name] = [] for snapshot in snapshots: snapshot_data = { - "label": snapshot.label, + "label": snapshot.feature_label, "corner_image_points": [snapshot.x_px, snapshot.y_px], "timestamp": timestamp} formatted_data[detector_name].append(snapshot_data) @@ -258,7 +258,7 @@ def locate_reference_board(self, detector_data: dict[str, list[Annotation]]): [detector_data[detector_name][i+3].x_px, detector_data[detector_name][i+3].y_px]] marker_corners = MarkerCorners( detector_label=detector_name, - marker_id=int(detector_data[detector_name][i].label), + marker_id=int(detector_data[detector_name][i].feature_label), points=corners_list, timestamp=timestamp) self.pose_solver.add_marker_corners([marker_corners]) diff --git a/src/board_builder/structures.py b/src/board_builder/structures.py new file mode 100644 index 0000000..8385a74 --- /dev/null +++ b/src/board_builder/structures.py @@ -0,0 +1,232 @@ +from src.common.util import MathUtils +from src.common.structures import Matrix4x4, Pose +import abc +import datetime +import numpy as np +from pydantic import BaseModel, Field, PrivateAttr +from scipy.spatial.transform import Rotation as R + + +# TODO: Merge into a similar structure in common +class MarkerCorners: + detector_label: str + marker_id: int + points: list[list[float]] + timestamp: datetime.datetime + + def __init__( + self, + detector_label: str, + marker_id: int, + points: list[list[float]], + timestamp: datetime.datetime + ): + self.detector_label = detector_label + self.marker_id = marker_id + self.points = points + self.timestamp = timestamp + + +class MarkerRaySet(BaseModel): + marker_id: int = Field() + image_points: list[list[float]] = Field() # image positions of marker corners. Size 4. + image_timestamp: datetime.datetime = Field() + ray_origin_reference: list[float] = Field() # Shared origin for all rays (same detector) + ray_directions_reference: list[list[float]] = Field() # Size 4 (one for each image point) + detector_label: str = Field() + detector_to_reference_matrix: Matrix4x4 = Field() + + @staticmethod + def age_seconds( + marker_ray_set, + query_timestamp: datetime.datetime + ): + return (query_timestamp - marker_ray_set.image_timestamp).total_seconds() + + @staticmethod + def newest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: + return_value = datetime.datetime.now() + for ray_set in marker_ray_set_list: + if ray_set.image_timestamp > return_value: + return_value = ray_set.image_timestamp + return return_value + + @staticmethod + def oldest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: + return_value = datetime.datetime.utcfromtimestamp(0) + for ray_set in marker_ray_set_list: + if ray_set.image_timestamp > return_value: + return_value = ray_set.image_timestamp + return return_value + + +class MatrixNode: + def __init__(self, node_id: str): + self.id = node_id + self.neighbours = [] + self.weights = {} + + def add_neighbour(self, neighbour_node, weight: int): + self.neighbours.append(neighbour_node) + self.weights[neighbour_node.id] = weight + + +# TODO: Merge/replace this with pose under common data structures +class PoseData(BaseModel): + target_id: str = Field() + object_to_reference_matrix: Matrix4x4 = Field() + ray_sets: list[MarkerRaySet] + + def newest_timestamp(self) -> datetime.datetime: + return MarkerRaySet.newest_timestamp_in_list(self.ray_sets) + + def oldest_timestamp(self) -> datetime.datetime: + return MarkerRaySet.oldest_timestamp_in_list(self.ray_sets) + + @staticmethod + def age_seconds( + pose, + query_timestamp: datetime.datetime + ) -> float: + return (query_timestamp - pose.oldest_timestamp()).total_seconds() + + +class PoseLocation: + + _id: str + _timestamp: str + _TMatrix: np.ndarray + _RMAT_list: list + _TVEC_list: list + + def __init__(self, object_id): + self._id = object_id + self._timestamp = str(datetime.datetime.now()) + + self._TMatrix = np.eye(4) + self._RMAT_list = [] # Rotation matrix + self._TVEC_list = [] # Translation vector + + self.frame_count = 0 + + def add_matrix(self, transformation_matrix: Matrix4x4, timestamp: str): + self._timestamp = timestamp + + self._RMAT_list.append(transformation_matrix[:3, :3]) + self._TVEC_list.append(transformation_matrix[:3, 3]) + + avg_translation = np.mean(self._TVEC_list, axis=0) + + quaternions = [R.from_matrix(rot).as_quat(canonical=True) for rot in self._RMAT_list] + quaternions = [[float(quaternion[i]) for i in range(0, 4)] for quaternion in quaternions] + avg_quat = MathUtils.average_quaternion(quaternions) + avg_rotation = R.from_quat(avg_quat).as_matrix() + + self._TMatrix[:3, :3] = avg_rotation + self._TMatrix[:3, 3] = avg_translation + + def get_matrix(self): + return self._TMatrix + + def get_average_pose(self): + pose = Pose( + target_id=self._id, + object_to_reference_matrix=Matrix4x4.from_numpy_array(self._TMatrix), + solver_timestamp_utc_iso8601=self._timestamp + ) + return pose + + def get_median_pose(self): + if not self._RMAT_list or not self._TVEC_list: + raise ValueError("No matrices available to compute the median.") + + rmat_array = np.array(self._RMAT_list) + tvec_array = np.array(self._TVEC_list) + + median_rmat = np.median(rmat_array, axis=0) + median_tvec = np.median(tvec_array, axis=0) + + median_transformation_matrix = np.eye(4) + median_transformation_matrix[:3, :3] = median_rmat + median_transformation_matrix[:3, 3] = median_tvec + + pose = Pose( + target_id=self._id, + object_to_reference_matrix=Matrix4x4.from_numpy_array(median_transformation_matrix), + solver_timestamp_utc_iso8601=self._timestamp + ) + + return pose + + +class Marker(BaseModel): + marker_id: str = Field() + marker_size: float | None = Field(default=None) + points: list[list[float]] | None = Field(default=None) + + def get_points_internal(self) -> list[list[float]]: + # Use the TargetBase.get_points() instead. + if self.points is None: + if self.marker_size is None: + raise RuntimeError("TargetMarker defined with neither marker_size nor points.") + half_width = self.marker_size / 2.0 + self.points = [ + [-half_width, half_width, 0.0], + [half_width, half_width, 0.0], + [half_width, -half_width, 0.0], + [-half_width, -half_width, 0.0]] + return self.points + + +class TargetBase(BaseModel, abc.ABC): + label: str = Field() + + @abc.abstractmethod + def get_marker_ids(self) -> list[str]: ... + + @abc.abstractmethod + def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: ... + + @abc.abstractmethod + def get_points(self) -> list[list[float]]: ... + + +class TargetMarker(TargetBase, Marker): + def get_marker_ids(self) -> list[str]: + return [self.marker_id] + + def get_points(self) -> list[list[float]]: + return self.get_points_internal() + + def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: + if marker_id != self.marker_id: + raise IndexError(f"marker_id {marker_id} is not in target {self.label}") + return self.get_points_internal() + + +class TargetBoard(TargetBase): + markers: list[Marker] = Field() + _marker_dict: None | dict[str, Marker] = PrivateAttr() + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._marker_dict = None + + def get_marker_ids(self) -> list[str]: + return [marker.marker_id for marker in self.markers] + + def get_points(self) -> list[list[float]]: + points = list() + for marker in self.markers: + points += marker.get_points_internal() + return points + + def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: + if self._marker_dict is None: + self._marker_dict = dict() + for marker in self.markers: + self._marker_dict[marker.marker_id] = marker + if marker_id not in self._marker_dict: + raise IndexError(f"marker_id {marker_id} is not in target {self.label}") + return self._marker_dict[marker_id].points + diff --git a/src/board_builder/structures/__init__.py b/src/board_builder/structures/__init__.py deleted file mode 100644 index 563af0d..0000000 --- a/src/board_builder/structures/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .marker_corners import MarkerCorners -from .marker_ray_set import MarkerRaySet -from .pose_data import PoseData -from .pose_location import PoseLocation -from .target_base import Target -from .target_marker import TargetMarker -from .matrix_node import MatrixNode diff --git a/src/board_builder/structures/marker_corners.py b/src/board_builder/structures/marker_corners.py deleted file mode 100644 index 8ef2867..0000000 --- a/src/board_builder/structures/marker_corners.py +++ /dev/null @@ -1,21 +0,0 @@ -import datetime - - -# TODO: Merge into a similar structure in common -class MarkerCorners: - detector_label: str - marker_id: int - points: list[list[float]] - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - marker_id: int, - points: list[list[float]], - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.marker_id = marker_id - self.points = points - self.timestamp = timestamp diff --git a/src/board_builder/structures/marker_ray_set.py b/src/board_builder/structures/marker_ray_set.py deleted file mode 100644 index 81d651b..0000000 --- a/src/board_builder/structures/marker_ray_set.py +++ /dev/null @@ -1,36 +0,0 @@ -from src.common.structures import Matrix4x4 -import datetime -from pydantic import BaseModel, Field - - -class MarkerRaySet(BaseModel): - marker_id: int = Field() - image_points: list[list[float]] = Field() # image positions of marker corners. Size 4. - image_timestamp: datetime.datetime = Field() - ray_origin_reference: list[float] = Field() # Shared origin for all rays (same detector) - ray_directions_reference: list[list[float]] = Field() # Size 4 (one for each image point) - detector_label: str = Field() - detector_to_reference_matrix: Matrix4x4 = Field() - - @staticmethod - def age_seconds( - marker_ray_set, - query_timestamp: datetime.datetime - ): - return (query_timestamp - marker_ray_set.image_timestamp).total_seconds() - - @staticmethod - def newest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: - return_value = datetime.datetime.now() - for ray_set in marker_ray_set_list: - if ray_set.image_timestamp > return_value: - return_value = ray_set.image_timestamp - return return_value - - @staticmethod - def oldest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: - return_value = datetime.datetime.utcfromtimestamp(0) - for ray_set in marker_ray_set_list: - if ray_set.image_timestamp > return_value: - return_value = ray_set.image_timestamp - return return_value diff --git a/src/board_builder/structures/matrix_node.py b/src/board_builder/structures/matrix_node.py deleted file mode 100644 index 2c83f16..0000000 --- a/src/board_builder/structures/matrix_node.py +++ /dev/null @@ -1,9 +0,0 @@ -class MatrixNode: - def __init__(self, node_id: str): - self.id = node_id - self.neighbours = [] - self.weights = {} - - def add_neighbour(self, neighbour_node, weight: int): - self.neighbours.append(neighbour_node) - self.weights[neighbour_node.id] = weight diff --git a/src/board_builder/structures/pose_data.py b/src/board_builder/structures/pose_data.py deleted file mode 100644 index 13205ba..0000000 --- a/src/board_builder/structures/pose_data.py +++ /dev/null @@ -1,24 +0,0 @@ -from .marker_ray_set import MarkerRaySet -from src.common.structures import Matrix4x4 -import datetime -from pydantic import BaseModel, Field - - -# TODO: Merge/replace this with pose under common data structures -class PoseData(BaseModel): - target_id: str = Field() - object_to_reference_matrix: Matrix4x4 = Field() - ray_sets: list[MarkerRaySet] - - def newest_timestamp(self) -> datetime.datetime: - return MarkerRaySet.newest_timestamp_in_list(self.ray_sets) - - def oldest_timestamp(self) -> datetime.datetime: - return MarkerRaySet.oldest_timestamp_in_list(self.ray_sets) - - @staticmethod - def age_seconds( - pose, - query_timestamp: datetime.datetime - ) -> float: - return (query_timestamp - pose.oldest_timestamp()).total_seconds() diff --git a/src/board_builder/structures/pose_location.py b/src/board_builder/structures/pose_location.py deleted file mode 100644 index fb96037..0000000 --- a/src/board_builder/structures/pose_location.py +++ /dev/null @@ -1,73 +0,0 @@ -from src.common.util import MathUtils -from src.common.structures import Matrix4x4, Pose -import datetime -import numpy as np -from scipy.spatial.transform import Rotation as R - - -class PoseLocation: - - _id: str - _timestamp: str - _TMatrix: np.ndarray - _RMAT_list: list - _TVEC_list: list - - def __init__(self, object_id): - self._id = object_id - self._timestamp = str(datetime.datetime.now()) - - self._TMatrix = np.eye(4) - self._RMAT_list = [] # Rotation matrix - self._TVEC_list = [] # Translation vector - - self.frame_count = 0 - - def add_matrix(self, transformation_matrix: Matrix4x4, timestamp: str): - self._timestamp = timestamp - - self._RMAT_list.append(transformation_matrix[:3, :3]) - self._TVEC_list.append(transformation_matrix[:3, 3]) - - avg_translation = np.mean(self._TVEC_list, axis=0) - - quaternions = [R.from_matrix(rot).as_quat(canonical=True) for rot in self._RMAT_list] - quaternions = [[float(quaternion[i]) for i in range(0, 4)] for quaternion in quaternions] - avg_quat = MathUtils.average_quaternion(quaternions) - avg_rotation = R.from_quat(avg_quat).as_matrix() - - self._TMatrix[:3, :3] = avg_rotation - self._TMatrix[:3, 3] = avg_translation - - def get_matrix(self): - return self._TMatrix - - def get_average_pose(self): - pose = Pose( - target_id=self._id, - object_to_reference_matrix=Matrix4x4.from_numpy_array(self._TMatrix), - solver_timestamp_utc_iso8601=self._timestamp - ) - return pose - - def get_median_pose(self): - if not self._RMAT_list or not self._TVEC_list: - raise ValueError("No matrices available to compute the median.") - - rmat_array = np.array(self._RMAT_list) - tvec_array = np.array(self._TVEC_list) - - median_rmat = np.median(rmat_array, axis=0) - median_tvec = np.median(tvec_array, axis=0) - - median_transformation_matrix = np.eye(4) - median_transformation_matrix[:3, :3] = median_rmat - median_transformation_matrix[:3, 3] = median_tvec - - pose = Pose( - target_id=self._id, - object_to_reference_matrix=Matrix4x4.from_numpy_array(median_transformation_matrix), - solver_timestamp_utc_iso8601=self._timestamp - ) - - return pose diff --git a/src/board_builder/structures/target_base.py b/src/board_builder/structures/target_base.py deleted file mode 100644 index 1e2e25c..0000000 --- a/src/board_builder/structures/target_base.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydantic import BaseModel - - -class Target(BaseModel): - pass diff --git a/src/board_builder/structures/target_marker.py b/src/board_builder/structures/target_marker.py deleted file mode 100644 index ccb6184..0000000 --- a/src/board_builder/structures/target_marker.py +++ /dev/null @@ -1,7 +0,0 @@ -from .target_base import Target -from pydantic import Field - - -class TargetMarker(Target): - marker_id: int = Field() - marker_size: float = Field() diff --git a/src/board_builder/test/accuracy/accuracy_test.py b/src/board_builder/test/accuracy/accuracy_test.py index cb496c0..c1385e9 100644 --- a/src/board_builder/test/accuracy/accuracy_test.py +++ b/src/board_builder/test/accuracy/accuracy_test.py @@ -1,9 +1,9 @@ from src.board_builder.board_builder import BoardBuilder from src.common import MathUtils -from src.common.structures import \ - Annotation, \ - TargetBoard, \ - Marker +from src.common.structures import Annotation +from src.board_builder.structures import \ + Marker, \ + TargetBoard from .structures import AccuracyTestParameters from .utils import \ generate_virtual_snapshots, \ @@ -47,7 +47,7 @@ def _add_noise_to_corners(self, data): # Apply noise for i, corner in enumerate(marker_snapshot.corner_image_points): noisy_marker_snapshots.append(Annotation( - label=f"{marker_snapshot.label}_{i}", + feature_label=f"{marker_snapshot.feature_label}_{i}", x_px=corner.x_px + noise[i * 2], y_px=corner.y_px + noise[i * 2 + 1])) @@ -143,7 +143,7 @@ def _write_results_to_file(self, module_name: str, snapshots, two_dimension_coll { detector_name: [ { - "label": snapshot.label, + "label": snapshot.feature_label, "corner_image_points": [{"x_px": pt.x_px, "y_px": pt.y_px} for pt in snapshot.corner_image_points] } diff --git a/src/board_builder/test/accuracy/utils/generate_data.py b/src/board_builder/test/accuracy/utils/generate_data.py index 89e0cc8..5d87a68 100644 --- a/src/board_builder/test/accuracy/utils/generate_data.py +++ b/src/board_builder/test/accuracy/utils/generate_data.py @@ -100,7 +100,7 @@ def generate_data(board_coordinates, detector_poses, remove_markers_out_of_frame for corner_index, marker_corner in enumerate(marker_corners): corner_label: str = f"{str(marker)}_{corner_index}" marker_snapshot_list.append(Annotation( - label=corner_label, + feature_label=corner_label, x_px=marker_corner[0], y_px=marker_corner[1])) diff --git a/src/board_builder/test/accuracy/utils/graph_renderer.py b/src/board_builder/test/accuracy/utils/graph_renderer.py index 6c3347a..07d9b2c 100644 --- a/src/board_builder/test/accuracy/utils/graph_renderer.py +++ b/src/board_builder/test/accuracy/utils/graph_renderer.py @@ -80,7 +80,7 @@ def plot_quadrilateral(ax, corners, marker_id, color): ax = ax_green if detector == 'camera 1' else ax_blue for marker_snapshot in marker_snapshots: corners = np.array([(corner.x_px, corner.y_px) for corner in marker_snapshot.corner_image_points]) - plot_quadrilateral(ax, corners, f'M{marker_snapshot.label}S{i}', color) + plot_quadrilateral(ax, corners, f'M{marker_snapshot.feature_label}S{i}', color) # Set limits and aspect for ax in [ax_green, ax_blue]: diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py index d3819ff..1238e7b 100644 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ b/src/board_builder/utils/board_builder_pose_solver.py @@ -5,14 +5,13 @@ PoseLocation from src.common import MathUtils from src.common.structures import \ - CharucoBoardSpecification, \ IntrinsicParameters, \ IterativeClosestPointParameters, \ Matrix4x4, \ Pose, \ - Ray, \ - TargetBase, \ - TargetMarker + Ray +from src.board_builder.structures import TargetBase, TargetMarker +from src.implementations.common_aruco_opencv import ArucoOpenCVCommon from src.pose_solver.structures import PoseSolverParameters import cv2 import cv2.aruco @@ -192,7 +191,7 @@ def __init__(self): self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS, self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS]) - self._charuco_board = CharucoBoardSpecification() + self._charuco_board = ArucoOpenCVCommon.CharucoBoard() self._board_marker_ids = self._charuco_board.get_marker_ids() self._board_marker_positions = self._charuco_board.get_marker_center_points() self._board_marker_size = 10 @@ -567,7 +566,7 @@ def _estimate_target_pose_relative_to_reference(self): intersections_appear_valid = False break else: - corner_points_in_reference.append(intersection_result.centroid()) + corner_points_in_reference.append(intersection_result.centroid().tolist()) if not intersections_appear_valid: rejected_intersection_marker_ids.append(marker_id) continue @@ -725,7 +724,7 @@ def _estimate_target_pose_relative_to_reference(self): detector_position_reference = detector_to_reference_matrix.as_numpy_array()[0:3, 3] target_position_reference = object_to_reference_matrix[0:3, 3] depth_vector_reference = target_position_reference - detector_position_reference - old_depth = numpy.linalg.norm(depth_vector_reference) + old_depth = float(numpy.linalg.norm(depth_vector_reference)) target_depth_key = TargetDepthKey(target_id=target_id, detector_label=detector_label) new_depth = float(numpy.average( [target_depth.depth for target_depth in diff --git a/src/common/annotator.py b/src/common/annotator.py index d770aba..88f9447 100644 --- a/src/common/annotator.py +++ b/src/common/annotator.py @@ -12,7 +12,6 @@ from enum import StrEnum import numpy from pydantic import BaseModel, Field -from typing import Final class _Configuration(BaseModel): @@ -20,9 +19,9 @@ class _Configuration(BaseModel): class _Status(StrEnum): - STOPPED: Final[int] = "STOPPED" - RUNNING: Final[int] = "RUNNING" - FAILURE: Final[int] = "FAILURE" + STOPPED = "STOPPED" + RUNNING = "RUNNING" + FAILURE = "FAILURE" class MCTAnnotatorRuntimeError(MCTError): diff --git a/src/common/camera.py b/src/common/camera.py index ee6ac9d..953ab70 100644 --- a/src/common/camera.py +++ b/src/common/camera.py @@ -15,7 +15,7 @@ from enum import StrEnum import numpy from pydantic import BaseModel, Field -from typing import Final, Union +from typing import Union class _Configuration(BaseModel): @@ -24,9 +24,9 @@ class _Configuration(BaseModel): class _Status(StrEnum): - STOPPED: Final[int] = "STOPPED" - RUNNING: Final[int] = "RUNNING" - FAILURE: Final[int] = "FAILURE" + STOPPED = "STOPPED" + RUNNING = "RUNNING" + FAILURE = "FAILURE" class MCTCameraRuntimeError(MCTError): diff --git a/src/common/mct_component.py b/src/common/mct_component.py index 299e42d..af55770 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -77,7 +77,7 @@ def parse_dynamic_series_list( supported_types=supported_types) except MCTSerializationError as e: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=e.message) raise e @@ -206,12 +206,16 @@ def websocket_handle_requests( else: message: str = f"Received unimplemented parsable_type: {request.parsable_type}." logger.error(message) - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) response_series.append(ErrorResponse(message=message)) except Exception as e: message: str = f"Internal error. Failed to process request series." logger.error(message + " " + str(e)) - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) response_series.append(ErrorResponse(message=message)) return MCTResponseSeries( series=response_series) diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py index 650c134..ef9b30c 100644 --- a/src/common/structures/__init__.py +++ b/src/common/structures/__init__.py @@ -3,8 +3,7 @@ ImageFormat, \ ImageResolution, \ IntrinsicCalibration, \ - IntrinsicParameters, \ - RELATION_CHARACTER + IntrinsicParameters from .linear_algebra import \ IterativeClosestPointParameters, \ Landmark, \ @@ -30,8 +29,4 @@ from .tracking import \ Annotation, \ DetectorFrame, \ - Marker, \ - PoseSolverFrame, \ - TargetBase, \ - TargetBoard, \ - TargetMarker + PoseSolverFrame diff --git a/src/common/structures/image.py b/src/common/structures/image.py index 2b9091b..b4b33e5 100644 --- a/src/common/structures/image.py +++ b/src/common/structures/image.py @@ -1,10 +1,7 @@ from enum import StrEnum import math from pydantic import BaseModel, Field -from typing import ClassVar, Final - - -RELATION_CHARACTER: Final[str] = "$" +from typing import ClassVar class Annotation(BaseModel): @@ -12,24 +9,28 @@ class Annotation(BaseModel): A distinct point as detected on a detector image. """ + # These can denote that multiple landmarks are related if they share the same + # "base label" (the part before the first and only occurrence of this character). + RELATION_CHARACTER: ClassVar[str] = "$" + UNIDENTIFIED_LABEL: ClassVar[str] = str() - label: str = Field() # Empty indicates that something was detected but not identified + feature_label: str = Field() # Empty indicates that something was detected but not identified x_px: float = Field() y_px: float = Field() - def base_label(self): + def base_feature_label(self) -> str: """ - Part of the label before the RELATED_PREFIX. + Part of the label before the RELATION_CHARACTER. """ - if RELATION_CHARACTER not in self.label: - return self.label - return self.label[0:self.label.index(RELATION_CHARACTER)] + if self.RELATION_CHARACTER not in self.feature_label: + return self.feature_label + return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] class ImageFormat(StrEnum): - FORMAT_PNG: Final[str] = ".png" - FORMAT_JPG: Final[str] = ".jpg" + FORMAT_PNG = ".png" + FORMAT_JPG = ".jpg" class ImageResolution(BaseModel): diff --git a/src/common/structures/linear_algebra.py b/src/common/structures/linear_algebra.py index bd48e5b..316cafc 100644 --- a/src/common/structures/linear_algebra.py +++ b/src/common/structures/linear_algebra.py @@ -1,6 +1,6 @@ import numpy from pydantic import BaseModel, Field -from typing import Final +from typing import ClassVar, Final _DEFAULT_EPSILON: Final[float] = 0.0001 @@ -22,15 +22,31 @@ class IterativeClosestPointParameters(BaseModel): class Landmark(BaseModel): + + # These can denote that multiple landmarks are related if they share the same + # "base label" (the part before the first and only occurrence of this character). + RELATION_CHARACTER: ClassVar[str] = "$" + """ A distinct point in 3D space. Coordinates are in the unit of the user's choosing. """ - label: str = Field() + feature_label: str = Field() x: float = Field() y: float = Field() z: float = Field() + def as_float_list(self) -> list[float]: + return [self.x, self.y, self.z] + + def base_feature_label(self) -> str: + """ + Part of the label before the RELATION_CHARACTER. + """ + if self.RELATION_CHARACTER not in self.feature_label: + return self.feature_label + return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] + class Matrix4x4(BaseModel): @@ -134,4 +150,14 @@ class Target(BaseModel): """ A trackable object. """ + label: str landmarks: list[Landmark] + + def get_landmark_point( + self, + feature_label: str + ) -> list[float]: + for landmark in self.landmarks: + if landmark.feature_label == feature_label: + return landmark.as_float_list() + raise ValueError diff --git a/src/common/structures/tracking.py b/src/common/structures/tracking.py index f7c33c7..eab389f 100644 --- a/src/common/structures/tracking.py +++ b/src/common/structures/tracking.py @@ -1,9 +1,7 @@ from .image import Annotation, ImageResolution from .linear_algebra import Pose -import abc import datetime -import numpy -from pydantic import BaseModel, Field, PrivateAttr +from pydantic import BaseModel, Field class DetectorFrame(BaseModel): @@ -13,11 +11,17 @@ class DetectorFrame(BaseModel): @property def annotations_identified(self): - return [annotation for annotation in self.annotations if annotation.label != Annotation.UNIDENTIFIED_LABEL] + return [ + annotation + for annotation in self.annotations + if annotation.feature_label != Annotation.UNIDENTIFIED_LABEL] @property def annotations_unidentified(self): - return [annotation for annotation in self.annotations if annotation.label == Annotation.UNIDENTIFIED_LABEL] + return [ + annotation + for annotation in self.annotations + if annotation.feature_label == Annotation.UNIDENTIFIED_LABEL] @property def timestamp_utc(self): @@ -31,80 +35,3 @@ class PoseSolverFrame(BaseModel): def timestamp_utc(self): return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) - - -# -------------------------------------------------------------------------------- -# Everything below should be assessed for either migration or deletion -# -------------------------------------------------------------------------------- - - -class Marker(BaseModel): - marker_id: str = Field() - marker_size: float | None = Field(default=None) - points: list[list[float]] | None = Field(default=None) - - def get_points_internal(self) -> list[list[float]]: - # Use the TargetBase.get_points() instead. - if self.points is None: - if self.marker_size is None: - raise RuntimeError("TargetMarker defined with neither marker_size nor points.") - half_width = self.marker_size / 2.0 - self.points = [ - [-half_width, half_width, 0.0], - [half_width, half_width, 0.0], - [half_width, -half_width, 0.0], - [-half_width, -half_width, 0.0]] - return self.points - - -class TargetBase(BaseModel, abc.ABC): - label: str = Field() - - @abc.abstractmethod - def get_marker_ids(self) -> list[str]: ... - - @abc.abstractmethod - def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: ... - - @abc.abstractmethod - def get_points(self) -> list[list[float]]: ... - - -class TargetMarker(TargetBase, Marker): - def get_marker_ids(self) -> list[str]: - return [self.marker_id] - - def get_points(self) -> list[list[float]]: - return self.get_points_internal() - - def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: - if marker_id != self.marker_id: - raise IndexError(f"marker_id {marker_id} is not in target {self.label}") - return self.get_points_internal() - - -class TargetBoard(TargetBase): - markers: list[Marker] = Field() - _marker_dict: None | dict[str, Marker] = PrivateAttr() - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self._marker_dict = None - - def get_marker_ids(self) -> list[str]: - return [marker.marker_id for marker in self.markers] - - def get_points(self) -> list[list[float]]: - points = list() - for marker in self.markers: - points += marker.get_points_internal() - return points - - def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: - if self._marker_dict is None: - self._marker_dict = dict() - for marker in self.markers: - self._marker_dict[marker.marker_id] = marker - if marker_id not in self._marker_dict: - raise IndexError(f"marker_id {marker_id} is not in target {self.label}") - return self._marker_dict[marker_id].points diff --git a/src/common/util/math_utils.py b/src/common/util/math_utils.py index ec42c56..27b4c6f 100644 --- a/src/common/util/math_utils.py +++ b/src/common/util/math_utils.py @@ -1,9 +1,10 @@ from ..structures import \ + Annotation, \ IterativeClosestPointParameters, \ IntrinsicParameters, \ Matrix4x4, \ Ray, \ - TargetBase + Target import cv2 import numpy from scipy.spatial.transform import Rotation @@ -76,7 +77,7 @@ def centroid(self) -> numpy.ndarray: return (self.closest_point_1 + self.closest_point_2) / 2 def distance(self) -> float: - return numpy.linalg.norm(self.closest_point_2 - self.closest_point_1) + return float(numpy.linalg.norm(self.closest_point_2 - self.closest_point_1)) @staticmethod def closest_intersection_between_two_lines( @@ -84,8 +85,8 @@ def closest_intersection_between_two_lines( ray_2: Ray, epsilon: float = _DEFAULT_EPSILON ) -> RayIntersection2Output: # Returns data on intersection - ray_1_direction_normalized = ray_1.direction / numpy.linalg.norm(ray_1.direction) - ray_2_direction_normalized = ray_2.direction / numpy.linalg.norm(ray_2.direction) + ray_1_direction_normalized = numpy.asarray(ray_1.direction) / numpy.linalg.norm(ray_1.direction) + ray_2_direction_normalized = numpy.asarray(ray_2.direction) / numpy.linalg.norm(ray_2.direction) # ray 3 will be perpendicular to both rays 1 and 2, # and will intersect with both rays at the nearest point(s) @@ -95,8 +96,8 @@ def closest_intersection_between_two_lines( if ray_3_direction_norm < epsilon: return MathUtils.RayIntersection2Output( parallel=True, - closest_point_1=ray_1.source_point, - closest_point_2=ray_2.source_point) + closest_point_1=numpy.asarray(ray_1.source_point), + closest_point_2=numpy.asarray(ray_2.source_point)) # system of equations Ax = b b = numpy.subtract(ray_2.source_point, ray_1.source_point) @@ -225,28 +226,6 @@ def convert_detector_points_to_vectors( rays.append(list(ray_direction_reference[0:3])) return rays - @staticmethod - def convert_detector_corners_to_vectors( - corners_by_marker_id: dict[str, list[list[float]]], # [marker_id][point_index][x/y] - detector_intrinsics: IntrinsicParameters, - detector_to_reference_matrix: Matrix4x4 - ) -> dict[str, list[list[float]]]: # [marker_id][point_index][x/y/z] - """ - Given a detector's matrix transform and its intrinsic properties, - convert pixel coordinates to ray directions (with origin at the detector). - """ - ray_vectors_by_marker_id: dict[str, list[list[float]]] = dict() - corners: list[list[float]] - marker_id: str - for marker_id in corners_by_marker_id.keys(): - corners = corners_by_marker_id[marker_id] - rays: list[list[float]] = MathUtils.convert_detector_points_to_vectors( - points=corners, - detector_intrinsics=detector_intrinsics, - detector_to_reference_matrix=detector_to_reference_matrix) - ray_vectors_by_marker_id[marker_id] = rays - return ray_vectors_by_marker_id - @staticmethod def convex_quadrilateral_area( points: list[list[float]], # 2D points in clockwise order @@ -299,16 +278,18 @@ def convex_quadrilateral_area( @staticmethod def estimate_matrix_transform_to_detector( - target: TargetBase, - corners_by_marker_id: dict[str, list[list[float]]], # [marker_id][point_index][x/y] + annotations: list[Annotation], + target: Target, detector_intrinsics: IntrinsicParameters ) -> Matrix4x4: target_points: list[list[float]] = list() # ordered points [point_index][x/y/z] detector_points: list[list[float]] = list() # ordered points [point_index][x/y] - for marker_id in target.get_marker_ids(): - if marker_id in corners_by_marker_id: - target_points += target.get_points_for_marker_id(marker_id=marker_id) - detector_points += corners_by_marker_id[marker_id] + annotations_dict: dict[str, Annotation] = {annotation.feature_label: annotation for annotation in annotations} + for landmark in target.landmarks: + if landmark.feature_label in annotations_dict.keys(): + annotation = annotations_dict[landmark.feature_label] + target_points.append([landmark.x, landmark.y, landmark.z]) + detector_points.append([annotation.x_px, annotation.y_px]) rotation_vector: numpy.ndarray translation_vector: numpy.ndarray _, rotation_vector, translation_vector = cv2.solvePnP( @@ -524,13 +505,14 @@ def register_corresponding_points( for i in range(i, len(point_set)): p2: numpy.ndarray = numpy.asarray(point_set[1]) vec1 = p2 - p1 - vec1_length: float = numpy.linalg.norm(vec1) + vec1_length: float = float(numpy.linalg.norm(vec1)) if vec1_length > collinearity_zero_threshold: break # points are distinct, move to next phase for i in range(i, len(point_set)): p3: numpy.ndarray = numpy.asarray(point_set[2]) vec2: numpy.ndarray = p3 - p1 - cross_product_norm: float = numpy.linalg.norm(numpy.cross(vec1, vec2)) + # noinspection PyUnboundLocalVariable + cross_product_norm: float = float(numpy.linalg.norm(numpy.cross(vec1, vec2))) if cross_product_norm > collinearity_zero_threshold: collinear = False break @@ -561,3 +543,16 @@ def register_corresponding_points( matrix[0:3, 0:3] = rotation matrix[0:3, 3] = translation[0:3].reshape(3) return matrix + + @staticmethod + def square_marker_corner_points( + marker_size: float + ) -> list[list[float]]: #[corner_index][dimension_index], 3D + half_width = marker_size / 2.0 + corner_points = [ + [-half_width, half_width, 0., 1.], # Top-left + [half_width, half_width, 0., 1.], # Top-right + [half_width, -half_width, 0., 1.], # Bottom-right + [-half_width, -half_width, 0., 1.]] # Bottom-left + return corner_points + diff --git a/src/controller/configuration.py b/src/controller/configuration.py index 50ece83..b584467 100644 --- a/src/controller/configuration.py +++ b/src/controller/configuration.py @@ -1,16 +1,14 @@ from src.common.structures import \ KeyValueSimpleAny, \ Matrix4x4, \ - TargetBoard, \ - TargetMarker + Target from enum import StrEnum from pydantic import BaseModel, Field, SerializeAsAny -from typing import Final, Union class StartupMode(StrEnum): - DETECTING_ONLY: Final[str] = "detecting_only" - DETECTING_AND_SOLVING: Final[str] = "detecting_and_solving" + DETECTING_ONLY = "detecting_only" + DETECTING_AND_SOLVING = "detecting_and_solving" class MCTComponentConfig(BaseModel): @@ -27,7 +25,7 @@ class DetectorComponentConfig(MCTComponentConfig): class PoseSolverConfig(MCTComponentConfig): solver_parameters: list[SerializeAsAny[KeyValueSimpleAny]] | None = Field(default=None) - targets: list[Union[TargetBoard, TargetMarker]] | None = Field(default=None) + targets: list[Target] | None = Field(default=None) class MCTConfiguration(BaseModel): diff --git a/src/controller/connection.py b/src/controller/connection.py index b080411..77cb17f 100644 --- a/src/controller/connection.py +++ b/src/controller/connection.py @@ -18,7 +18,7 @@ MCTDeserializable, \ Pose, \ PoseSolverFrame, \ - TargetBase + Target from src.detector.api import \ CalibrationCalculateResponse, \ CalibrationImageAddResponse, \ @@ -67,17 +67,17 @@ class Connection(abc.ABC): class State(StrEnum): # This is the normal progression cycle ending back in "Inactive" - INACTIVE: Final[str] = "Inactive" - CONNECTING: Final[str] = "Connecting" - INITIALIZING: Final[str] = "Initializing" - RUNNING: Final[str] = "Running" - RECONNECTING: Final[str] = "Reconnecting" # Only if connection gets lost - NORMAL_DEINITIALIZING: Final[str] = "Deinitializing" # normal means not in a failure state - NORMAL_DISCONNECTING: Final[str] = "Disconnecting" + INACTIVE = "Inactive" + CONNECTING = "Connecting" + INITIALIZING = "Initializing" + RUNNING = "Running" + RECONNECTING = "Reconnecting" # Only if connection gets lost + NORMAL_DEINITIALIZING = "Deinitializing" # normal means not in a failure state + NORMAL_DISCONNECTING = "Disconnecting" # States below indicate abnormal/failed states - FAILURE: Final[str] = "Failure" - FAILURE_DISCONNECTING: Final[str] = "Failure - Disconnecting" - FAILURE_DEINITIALIZING: Final[str] = "Failure - Deinitializing" + FAILURE = "Failure" + FAILURE_DISCONNECTING = "Failure - Disconnecting" + FAILURE_DEINITIALIZING = "Failure - Deinitializing" class ComponentAddress: """ @@ -110,25 +110,25 @@ def __init__( self.error_message = error_message class DeinitializationResult(StrEnum): - IN_PROGRESS: Final[str] = "In Progress" - SUCCESS: Final[str] = "Success" - FAILURE: Final[str] = "Failure" + IN_PROGRESS = "In Progress" + SUCCESS = "Success" + FAILURE = "Failure" class InitializationResult(StrEnum): - IN_PROGRESS: Final[str] = "In Progress" - SUCCESS: Final[str] = "Success" - FAILURE: Final[str] = "Failure" + IN_PROGRESS = "In Progress" + SUCCESS = "Success" + FAILURE = "Failure" class SendRecvResult(StrEnum): - NORMAL: Final[str] = "Normal" - FAILURE: Final[str] = "Failure" + NORMAL = "Normal" + FAILURE = "Failure" class PopResponseSeriesResult: class Status(StrEnum): - QUEUED: Final[str] = "Exists" - IN_PROGRESS: Final[str] = "In Progress" - RESPONDED: Final[str] = "Responded" - UNTRACKED: Final[str] = "Untracked" # this suggests an error has occurred + QUEUED = "Exists" + IN_PROGRESS = "In Progress" + RESPONDED = "Responded" + UNTRACKED = "Untracked" # this suggests an error has occurred status: Status response_series: MCTResponseSeries | None @@ -347,7 +347,7 @@ def _response_series_converter( except ConnectionClosed as e: self._state = Connection.State.FAILURE self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection is closed for {self._component_address.label}. Cannot send. {str(e)}") return Connection.SendRecvResult.FAILURE @@ -364,7 +364,7 @@ def _response_series_converter( except ConnectionClosed as e: self._state = Connection.State.FAILURE self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection is closed for {self._component_address.label}. Cannot receive. {str(e)}") return Connection.SendRecvResult.FAILURE @@ -452,7 +452,7 @@ def _update_deinitialization_result(self) -> DeinitializationResult: request_series_id=self._deinit_request_id) if response_result.status == Connection.PopResponseSeriesResult.Status.UNTRACKED: self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"The current request ID is not recognized.") self._deinit_request_id = None return Connection.DeinitializationResult.FAILURE @@ -476,7 +476,7 @@ def _update_initialization_result(self) -> InitializationResult: request_series_id=self._init_request_id) if response_result.status == Connection.PopResponseSeriesResult.Status.UNTRACKED: self.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"The current request ID is not recognized.") self._init_request_id = None return Connection.InitializationResult.FAILURE @@ -494,20 +494,26 @@ def _update_in_connecting_state(self) -> None: connection_result: Connection.ConnectionResult = self._try_connect() if connection_result.success: message = f"Connection successful." - self.enqueue_status_message(severity="info", message=message) + self.enqueue_status_message( + severity=SeverityLabel.INFO, + message=message) self._state = Connection.State.INITIALIZING else: if self._attempt_count >= _ATTEMPT_COUNT_MAXIMUM: message = \ f"Failed to connect, received error: {str(connection_result.error_message)}. "\ f"Connection is being aborted after {self._attempt_count} attempts." - self.enqueue_status_message(severity="error", message=message) + self.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=message) self._state = Connection.State.FAILURE else: message: str = \ f"Failed to connect, received error: {str(connection_result.error_message)}. "\ f"Will retry in {_ATTEMPT_TIME_GAP_SECONDS} seconds." - self.enqueue_status_message(severity="warning", message=message) + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=message) self._next_attempt_timestamp_utc = now_utc + datetime.timedelta( seconds=_ATTEMPT_TIME_GAP_SECONDS) @@ -550,13 +556,17 @@ def _update_in_reconnecting_state(self) -> None: connection_result: Connection.ConnectionResult = self._try_connect() if connection_result.success: message = f"Reconnection successful." - self.enqueue_status_message(severity="info", message=message) + self.enqueue_status_message( + severity=SeverityLabel.INFO, + message=message) self._state = Connection.State.RUNNING else: message: str = \ f"Failed to reconnect, received error: {str(connection_result.error_message)}. "\ f"Will retry in {_ATTEMPT_TIME_GAP_SECONDS} seconds." - self.enqueue_status_message(severity="warning", message=message) + self.enqueue_status_message( + severity=SeverityLabel.WARNING, + message=message) self._next_attempt_timestamp_utc = now_utc + datetime.timedelta( seconds=_ATTEMPT_TIME_GAP_SECONDS) @@ -611,11 +621,11 @@ def handle_deinitialization_response_series( response_count: int = len(response_series.series) if response_count != 1: self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") elif not isinstance(response_series.series[0], (EmptyResponse, CameraParametersSetResponse)): self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"The deinitialization response was not of the expected type EmptyResponse.") return Connection.DeinitializationResult.SUCCESS @@ -626,11 +636,11 @@ def handle_initialization_response_series( response_count: int = len(response_series.series) if response_count != 1: self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Expected exactly one response to initialization requests. Got {response_count}.") elif not isinstance(response_series.series[0], EmptyResponse): self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"The initialization response was not of the expected type EmptyResponse.") return Connection.InitializationResult.SUCCESS @@ -657,7 +667,7 @@ class PoseSolverConnection(Connection): # These are variables used directly by the MCTController for storing data configured_solver_parameters: list[KeyValueSimpleAny] | None - configured_targets: list[TargetBase] | None + configured_targets: list[Target] | None request_id: uuid.UUID | None detector_poses: list[Pose] @@ -698,11 +708,11 @@ def handle_deinitialization_response_series( response_count: int = len(response_series.series) if response_count != 1: self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Expected exactly one response to deinitialization requests. Got {response_count}.") elif not isinstance(response_series.series[0], EmptyResponse): self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"The deinitialization response was not of the expected type EmptyResponse.") return Connection.DeinitializationResult.SUCCESS @@ -713,11 +723,11 @@ def handle_initialization_response_series( response_count: int = len(response_series.series) if response_count != 1: self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Expected exactly one response to initialization requests. Got {response_count}.") elif not isinstance(response_series.series[0], EmptyResponse): self.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"The initialization response was not of the expected type EmptyResponse.") return Connection.InitializationResult.SUCCESS diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 454b139..4967de7 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -15,6 +15,7 @@ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ + SeverityLabel, \ StatusMessageSource, \ TimestampGetRequest, \ TimestampGetResponse, \ @@ -65,18 +66,18 @@ class MCTController(MCTComponent): class Status(StrEnum): - STOPPED: Final[int] = "Idle" - STARTING: Final[int] = "Starting" - RUNNING: Final[int] = "Running" - STOPPING: Final[int] = "Stopping" + STOPPED = "Idle" + STARTING = "Starting" + RUNNING = "Running" + STOPPING = "Stopping" class StartupState(IntEnum): - INITIAL: Final[int] = 0 - CONNECTING: Final[int] = 1 - TIME_SYNC_START: Final[int] = 2 - TIME_SYNC_STOP: Final[int] = 3 - GET_INTRINSICS: Final[int] = 4 - SET_INTRINSICS: Final[int] = 5 + INITIAL = 0 + CONNECTING = 1 + TIME_SYNC_START = 2 + TIME_SYNC_STOP = 3 + GET_INTRINSICS = 4 + SET_INTRINSICS = 5 _status_message_source: StatusMessageSource _status: Status @@ -115,13 +116,13 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: IPv4Address(connection.ip_address) except ValueError: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection {connection.label} has invalid IP address {connection.ip_address}. " "It will be skipped.") return False if connection.port < 0 or connection.port > 65535: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Connection {connection.label} has invalid port {connection.port}. " "It will be skipped.") return False @@ -133,7 +134,7 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: component_address: Connection.ComponentAddress = Connection.ComponentAddress( label=detector.label, role="detector", - ip_address=detector.ip_address, + ip_address=IPv4Address(detector.ip_address), port=detector.port) detector_connection: DetectorConnection = self.add_connection(component_address=component_address) if detector.fixed_transform_to_reference is not None: @@ -148,7 +149,7 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: component_address: Connection.ComponentAddress = Connection.ComponentAddress( label=pose_solver.label, role="pose_solver", - ip_address=pose_solver.ip_address, + ip_address=IPv4Address(pose_solver.ip_address), port=pose_solver.port) pose_solver_connection: PoseSolverConnection = self.add_connection(component_address=component_address) if pose_solver.solver_parameters is not None: @@ -177,7 +178,7 @@ def add_connection( def _advance_startup_state(self) -> None: if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.CONNECTING: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="CONNECTING complete") component_labels: list[str] = self.get_component_labels(active=True) request_series: MCTRequestSeries = MCTRequestSeries(series=[TimeSyncStartRequest()]) @@ -194,7 +195,7 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.TIME_SYNC_START if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.TIME_SYNC_START: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="TIME_SYNC complete") component_labels: list[str] = self.get_component_labels(active=True) request_series: MCTRequestSeries = MCTRequestSeries(series=[ @@ -215,7 +216,7 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.TIME_SYNC_STOP if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.TIME_SYNC_STOP: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="STARTING_CAPTURE complete") detector_labels: list[str] = self.get_active_detector_labels() for detector_label in detector_labels: @@ -229,7 +230,7 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.GET_INTRINSICS if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.GET_INTRINSICS: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="GET_INTRINSICS complete") if self._startup_mode == StartupMode.DETECTING_ONLY: self._startup_state = MCTController.StartupState.INITIAL @@ -244,7 +245,7 @@ def _advance_startup_state(self) -> None: connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") continue if detector_connection.current_intrinsic_parameters is not None: @@ -262,7 +263,7 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.SET_INTRINSICS if len(self._pending_request_ids) <= 0 and self._startup_state == MCTController.StartupState.SET_INTRINSICS: self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message="SET_INTRINSICS complete") self._startup_state = MCTController.StartupState.INITIAL self._status = MCTController.Status.RUNNING @@ -378,7 +379,7 @@ def handle_error_response( response: ErrorResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received error: {response.message}") def handle_response_calibration_result_get_active( @@ -391,17 +392,17 @@ def handle_response_calibration_result_get_active( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") return if response.intrinsic_calibration is None: if detector_connection.current_resolution is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"No calibration was found for detector {detector_label}, and failed to get resolution.") return self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"No calibration was found for detector {detector_label}. " f"Zero parameters for active resolution {detector_connection.current_resolution} will be used.") detector_connection.current_intrinsic_parameters = IntrinsicParameters.generate_zero_parameters( @@ -420,7 +421,7 @@ def handle_response_camera_resolution_get( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") return detector_connection.current_resolution = response.resolution @@ -435,7 +436,7 @@ def handle_response_detector_frame_get( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") return frame: DetectorFrame = response.frame @@ -454,7 +455,7 @@ def handle_response_get_poses( connection_type=PoseSolverConnection) if pose_solver_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find PoseSolverConnection with label {pose_solver_label}.") return pose_solver_connection.detector_poses = response.detector_poses @@ -481,11 +482,11 @@ def handle_response_timestamp_get( network_plus_offset_seconds: float = (responder_timestamp - requester_timestamp).total_seconds() connection.network_plus_offset_samples_seconds.append(network_plus_offset_seconds) if self._time_sync_sample_count >= _TIME_SYNC_SAMPLE_MAXIMUM_COUNT: - connection.network_latency_seconds = numpy.median(connection.network_latency_samples_seconds) + connection.network_latency_seconds = float(numpy.median(connection.network_latency_samples_seconds)) connection.controller_offset_samples_seconds = [ network_plus_offset_sample_seconds - (connection.network_latency_seconds / 2.0) for network_plus_offset_sample_seconds in connection.network_plus_offset_samples_seconds] - connection.controller_offset_seconds = numpy.median(connection.controller_offset_samples_seconds) + connection.controller_offset_seconds = float(numpy.median(connection.controller_offset_samples_seconds)) print(f"Calculated offset to {connection.get_label()}: {connection.controller_offset_seconds}") def handle_response_unknown( @@ -493,7 +494,7 @@ def handle_response_unknown( response: MCTResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received unexpected response: {str(type(response))}") def handle_response_series( @@ -509,13 +510,13 @@ def handle_response_series( task_text = f" during {task_description}" if response_count < expected_response_count: self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Received a response series{task_text}, " f"but it contained fewer responses ({response_count}) " f"than expected ({expected_response_count}).") elif response_count > expected_response_count: self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Received a response series{task_text}, " f"but it contained more responses ({response_count}) " f"than expected ({expected_response_count}).") @@ -573,7 +574,7 @@ def recording_start( self._recording_save_path = save_path else: self.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Recording save path not defined") def recording_stop(self): @@ -751,7 +752,7 @@ def update( connection_type=DetectorConnection) if detector_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find DetectorConnection with label {detector_label}.") continue if detector_connection.request_id is not None: @@ -767,7 +768,7 @@ def update( connection_type=PoseSolverConnection) if pose_solver_connection is None: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to find PoseSolverConnection with label {pose_solver_label}.") continue if pose_solver_connection.request_id is not None: diff --git a/src/detector/detector_app.py b/src/detector/detector_app.py index e283da3..84ca601 100644 --- a/src/detector/detector_app.py +++ b/src/detector/detector_app.py @@ -16,14 +16,13 @@ Annotator, \ EmptyResponse, \ ErrorResponse, \ - NetworkUtils, \ TimestampGetRequest, \ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest import asyncio import base64 -from fastapi import FastAPI, Request +from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.websockets import WebSocket import hjson diff --git a/src/gui/gui.py b/src/gui/gui.py index 6e3e79b..6cefa10 100644 --- a/src/gui/gui.py +++ b/src/gui/gui.py @@ -5,7 +5,7 @@ ControllerPanel, \ DetectorPanel, \ PoseSolverPanel -from src.common import StatusMessageSource +from src.common import SeverityLabel, StatusMessageSource from src.controller import MCTController import logging import wx @@ -136,7 +136,7 @@ def update(self, *_args): self._controller.update() except Exception as e: self._controller.add_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Exception occurred in controller loop: {str(e)}") diff --git a/src/gui/panels/base_panel.py b/src/gui/panels/base_panel.py index e335342..74c7e79 100644 --- a/src/gui/panels/base_panel.py +++ b/src/gui/panels/base_panel.py @@ -8,6 +8,7 @@ from src.common import \ ErrorResponse, \ MCTResponse, \ + SeverityLabel, \ StatusMessageSource from src.common.structures import \ KeyValueSimpleAbstract, \ @@ -69,7 +70,7 @@ def populate_key_value_list_from_dynamic_ui( parameter_type = KeyValueSimpleInt else: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Failed to determine parameter type from UI element for key {label}.") continue key_values.append(parameter_type( @@ -120,7 +121,7 @@ def populate_dynamic_ui_from_key_value_list( step_value=key_value.range_step)) else: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Unsupported parameter type {key_value.parsable_type} will not be handled") return return_value @@ -129,7 +130,7 @@ def handle_error_response( response: ErrorResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received error: {response.message}") def handle_unknown_response( @@ -137,12 +138,12 @@ def handle_unknown_response( response: MCTResponse ): self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Received unexpected response: {str(type(response))}") def on_page_select(self): self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message=f"{self.GetName()} on_page_select") self.panel_is_selected = True if not self._update_loop_running: @@ -151,7 +152,7 @@ def on_page_select(self): def on_page_deselect(self): self.status_message_source.enqueue_status_message( - severity="debug", + severity=SeverityLabel.DEBUG, message=f"{self.GetName()} on_page_deselect") self.panel_is_selected = False diff --git a/src/gui/panels/board_builder_panel.py b/src/gui/panels/board_builder_panel.py index f61ed50..fc168e4 100644 --- a/src/gui/panels/board_builder_panel.py +++ b/src/gui/panels/board_builder_panel.py @@ -445,12 +445,12 @@ def _marker_snapshot_list_to_opencv_points( if len(marker_snapshot_list) <= 0: return numpy.asarray([], dtype=numpy.int32) return_value: list[list[list[(float, float)]]] = list() - current_base_label: str = marker_snapshot_list[0].base_label() + current_base_label: str = marker_snapshot_list[0].base_feature_label() current_shape_points: list[list[(float, float)]] = [[ marker_snapshot_list[0].x_px * scale, marker_snapshot_list[0].y_px * scale]] for marker_snapshot in marker_snapshot_list: - annotation_base_label = marker_snapshot.base_label() + annotation_base_label = marker_snapshot.base_feature_label() if annotation_base_label != current_base_label: return_value.append(current_shape_points) current_base_label = annotation_base_label diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index 6cb64ce..e2bd900 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -407,12 +407,12 @@ def _marker_snapshot_list_to_opencv_points( if len(marker_snapshot_list) <= 0: return numpy.asarray([], dtype=numpy.int32) return_value: list[list[list[(float, float)]]] = list() - current_base_label: str = marker_snapshot_list[0].base_label() + current_base_label: str = marker_snapshot_list[0].base_feature_label() current_shape_points: list[list[(float, float)]] = [[ marker_snapshot_list[0].x_px * scale, marker_snapshot_list[0].y_px * scale]] for marker_snapshot in marker_snapshot_list: - annotation_base_label = marker_snapshot.base_label() + annotation_base_label = marker_snapshot.base_feature_label() if annotation_base_label != current_base_label: return_value.append(current_shape_points) current_base_label = annotation_base_label diff --git a/src/gui/panels/pose_solver_panel.py b/src/gui/panels/pose_solver_panel.py index 26a42fa..eae3022 100644 --- a/src/gui/panels/pose_solver_panel.py +++ b/src/gui/panels/pose_solver_panel.py @@ -11,6 +11,7 @@ EmptyResponse, \ MCTResponse, \ MCTResponseSeries, \ + SeverityLabel, \ StatusMessageSource from src.common.structures import \ DetectorFrame, \ @@ -184,7 +185,7 @@ def on_tracking_row_selected(self, _event: wx.grid.GridEvent) -> None: self._tracking_display_textbox.SetValue(display_text) else: self.status_message_source.enqueue_status_message( - severity="error", + severity=SeverityLabel.ERROR, message=f"Target index {selected_index} is out of bounds. Selection will be set to None.") self._tracking_table.set_selected_row_index(None) self._update_ui_controls() @@ -296,7 +297,7 @@ def _update_ui_controls(self) -> None: if tracked_target_index is not None: if tracked_target_index >= len(self._tracked_target_poses): self.status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Selected tracked target index {tracked_target_index} is out of bounds. " "Setting to None.") self._tracking_table.set_selected_row_index(None) diff --git a/src/implementations/annotator_aruco_opencv.py b/src/implementations/annotator_aruco_opencv.py index d56b546..8ede806 100644 --- a/src/implementations/annotator_aruco_opencv.py +++ b/src/implementations/annotator_aruco_opencv.py @@ -2,12 +2,12 @@ from src.common import \ Annotator, \ MCTAnnotatorRuntimeError, \ + SeverityLabel, \ StatusMessageSource from src.common.structures import \ Annotation, \ KeyValueMetaAny, \ - KeyValueSimpleAny, \ - RELATION_CHARACTER + KeyValueSimpleAny import cv2.aruco import datetime import logging @@ -76,7 +76,9 @@ def update( ) -> None: if self._aruco_dictionary is None: message: str = "No ArUco dictionary has been set." - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) self.set_status(Annotator.Status.FAILURE) return @@ -95,11 +97,11 @@ def update( detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_count)) for detected_index, detected_id in enumerate(detected_dictionary_indices): for corner_index in range(4): - detected_label: str = f"{detected_id}{RELATION_CHARACTER}{corner_index}" + detected_label: str = f"{detected_id}{Annotation.RELATION_CHARACTER}{corner_index}" self._snapshots_identified.append(Annotation( - label=detected_label, - x_px=detected_corner_points_px[detected_index][corner_index][0], - y_px=detected_corner_points_px[detected_index][corner_index][1])) + feature_label=detected_label, + x_px=float(detected_corner_points_px[detected_index][corner_index][0]), + y_px=float(detected_corner_points_px[detected_index][corner_index][1]))) self._snapshots_unidentified = list() if rejected_corner_points_raw: @@ -107,8 +109,8 @@ def update( for rejected_index in range(rejected_corner_points_px.shape[0]): for corner_index in range(4): self._snapshots_unidentified.append(Annotation( - label=Annotation.UNIDENTIFIED_LABEL, - x_px=rejected_corner_points_px[rejected_index][corner_index][0], - y_px=rejected_corner_points_px[rejected_index][corner_index][1])) + feature_label=Annotation.UNIDENTIFIED_LABEL, + x_px=float(rejected_corner_points_px[rejected_index][corner_index][0]), + y_px=float(rejected_corner_points_px[rejected_index][corner_index][1]))) self._update_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/implementations/common_aruco_opencv.py b/src/implementations/common_aruco_opencv.py index ad535af..734952f 100644 --- a/src/implementations/common_aruco_opencv.py +++ b/src/implementations/common_aruco_opencv.py @@ -1,3 +1,4 @@ +from src.common import MathUtils from src.common.structures import \ KeyValueMetaAny, \ KeyValueMetaBool, \ @@ -10,7 +11,9 @@ KeyValueSimpleFloat, \ KeyValueSimpleInt, \ KeyValueSimpleString, \ - MCTSerializationError + Landmark, \ + MCTSerializationError, \ + Target import cv2.aruco import logging import numpy @@ -343,7 +346,7 @@ def assign_aruco_detection_parameters_to_key_value_list( return_value.append(KeyValueMetaEnum( key=_KEY_CORNER_REFINEMENT_METHOD, value=corner_refinement_method_text, - allowable_values=get_args(ArucoOpenCVCommon.CornerRefinementMethod))) + allowable_values=list(get_args(ArucoOpenCVCommon.CornerRefinementMethod)))) return_value.append(KeyValueMetaInt( key=_KEY_CORNER_REFINEMENT_WIN_SIZE, @@ -617,3 +620,17 @@ def assign_key_value_list_to_aruco_detection_parameters( else: mismatched_keys.append(key_value.key) return mismatched_keys + + @staticmethod + def target_from_marker_parameters( + base_label : str, + marker_size: float + ) -> Target: + corner_points: list[list[float]] = MathUtils.square_marker_corner_points(marker_size=marker_size) + landmarks: list[Landmark] = [ + Landmark( + feature_label=f"{base_label}{Landmark.RELATION_CHARACTER}{corner_index}", + x=corner_point[0], y=corner_point[1], z=corner_point[2]) + for corner_index, corner_point in enumerate(corner_points)] + target: Target = Target(label=base_label, landmarks=landmarks) + return target diff --git a/src/pose_solver/__init__.py b/src/pose_solver/__init__.py index efacf24..d7e596a 100644 --- a/src/pose_solver/__init__.py +++ b/src/pose_solver/__init__.py @@ -1,7 +1,6 @@ from .api import \ PoseSolverAddDetectorFrameRequest, \ - PoseSolverAddTargetMarkerRequest, \ - PoseSolverAddTargetBoardRequest, \ + PoseSolverAddTargetRequest, \ PoseSolverAddTargetResponse, \ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ diff --git a/src/pose_solver/api.py b/src/pose_solver/api.py index a8ef348..784d96f 100644 --- a/src/pose_solver/api.py +++ b/src/pose_solver/api.py @@ -6,10 +6,9 @@ IntrinsicParameters, \ Matrix4x4, \ Pose, \ - TargetBoard, \ - TargetMarker + Target from pydantic import Field -from typing import Final, Literal, Union +from typing import Final, Literal class PoseSolverAddDetectorFrameRequest(MCTRequest): @@ -26,34 +25,21 @@ def type_identifier() -> str: detector_frame: DetectorFrame = Field() -class PoseSolverAddTargetMarkerRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "add_target_marker" +class PoseSolverAddTargetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "add_target" @staticmethod def type_identifier() -> str: - return PoseSolverAddTargetMarkerRequest._TYPE_IDENTIFIER + return PoseSolverAddTargetRequest._TYPE_IDENTIFIER # noinspection PyTypeHints parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - target: TargetMarker = Field() - - -class PoseSolverAddTargetBoardRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "add_target_board" - - @staticmethod - def type_identifier() -> str: - return PoseSolverAddTargetBoardRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - target: TargetBoard = Field() + target: Target = Field() class PoseSolverAddTargetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "add_marker_corners" + _TYPE_IDENTIFIER: Final[str] = "add_target" @staticmethod def type_identifier() -> str: @@ -142,7 +128,7 @@ def type_identifier() -> str: # noinspection PyTypeHints parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - targets: list[Union[TargetMarker, TargetBoard]] = Field() + targets: list[Target] = Field() class PoseSolverStartRequest(MCTRequest): diff --git a/src/pose_solver/exceptions.py b/src/pose_solver/exceptions.py index f921c5a..7cac3f8 100644 --- a/src/pose_solver/exceptions.py +++ b/src/pose_solver/exceptions.py @@ -1,5 +1,9 @@ -class PoseSolverException(Exception): +from src.common import MCTError + + +class PoseSolverException(MCTError): message: str def __init__(self, message: str, *args, **kwargs): + super().__init__(args, kwargs) self.message = message diff --git a/src/pose_solver/pose_solver.py b/src/pose_solver/pose_solver.py index 72d09c3..9e8a00f 100644 --- a/src/pose_solver/pose_solver.py +++ b/src/pose_solver/pose_solver.py @@ -2,17 +2,17 @@ PoseSolverException from .structures import \ DetectorRecord, \ - DetectorFrameRecord, \ PoseSolverParameters from src.common import MathUtils from src.common.structures import \ + Annotation, \ DetectorFrame, \ IntrinsicParameters, \ IterativeClosestPointParameters, \ Matrix4x4, \ Pose, \ Ray, \ - TargetBase + Target import cv2 import cv2.aruco import datetime @@ -42,17 +42,17 @@ class PoseSolver: _parameters: PoseSolverParameters _intrinsics_by_detector_label: dict[str, IntrinsicParameters] _extrinsics_by_detector_label: dict[str, Matrix4x4] - _targets: list[TargetBase] # First target is considered the "reference" + _targets: list[Target] # First target is considered the "reference" # input per frame _detector_records_by_detector_label: dict[str, DetectorRecord] # internal threshold _minimum_marker_age_before_removal_seconds: float # use this to make sure each marker is associated uniquely to a single target - _marker_target_map: dict[str, TargetBase] # Each marker shall be used at most once by a single target + _landmark_target_map: dict[str, Target] # Each marker shall be used at most once by a single target # outputs - _poses_by_target_id: dict[str, Matrix4x4] + _poses_by_target_label: dict[str, Matrix4x4] _poses_by_detector_label: dict[str, Matrix4x4] def __init__( @@ -73,9 +73,9 @@ def __init__( self._parameters.POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS, self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS, self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS]) - self._marker_target_map = dict() + self._landmark_target_map = dict() - self._poses_by_target_id = dict() + self._poses_by_target_label = dict() self._poses_by_detector_label = dict() def add_detector_frame( @@ -83,34 +83,31 @@ def add_detector_frame( detector_label: str, detector_frame: DetectorFrame ) -> None: - detector_frame_record: DetectorFrameRecord = DetectorFrameRecord( - detector_label=detector_label, - frame=detector_frame) if detector_label not in self._detector_records_by_detector_label: self._detector_records_by_detector_label[detector_label] = DetectorRecord() self._detector_records_by_detector_label[detector_label].clear_frame_records() - self._detector_records_by_detector_label[detector_label].add_frame_record(detector_frame_record) + self._detector_records_by_detector_label[detector_label].add_frame_record(detector_frame) self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def add_target( self, - target: TargetBase + target: Target ) -> None: for existing_target in self._targets: if target.label == existing_target.label: raise PoseSolverException( f"Target with name {target.label} is already registered. " f"Please use a different name, and also make sure you are not adding the same target twice.") - marker_ids = target.get_marker_ids() - for marker_id in marker_ids: - if marker_id in self._marker_target_map: - target_id: str = self._marker_target_map[marker_id].label + landmark_labels: list[str] = [landmark.feature_label for landmark in target.landmarks] + for landmark_label in landmark_labels: + if landmark_label in self._landmark_target_map: + target_id: str = self._landmark_target_map[landmark_label].label raise PoseSolverException( - f"Marker {marker_id} is already used with target {target_id} and it cannot be reused.") + f"Landmark {landmark_label} is already used with target {target_id} and it cannot be reused.") target_index = len(self._targets) self._targets.append(target) - for marker_id in marker_ids: - self._marker_target_map[marker_id] = self._targets[target_index] + for landmark_label in landmark_labels: + self._landmark_target_map[landmark_label] = self._targets[target_index] self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def clear_extrinsic_matrices(self): @@ -123,7 +120,7 @@ def clear_intrinsic_parameters(self): def clear_targets(self): self._targets.clear() - self._marker_target_map.clear() + self._landmark_target_map.clear() self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def get_poses( @@ -143,10 +140,10 @@ def get_poses( target_id=str(target_id), object_to_reference_matrix=pose, solver_timestamp_utc_iso8601=self._last_updated_timestamp_utc.isoformat()) - for target_id, pose in self._poses_by_target_id.items()] + for target_id, pose in self._poses_by_target_label.items()] return detector_poses, target_poses - def list_targets(self) -> list[TargetBase]: + def list_targets(self) -> list[Target]: return self._targets def set_extrinsic_matrix( @@ -181,10 +178,10 @@ def set_reference_target( def set_targets( self, - targets: list[TargetBase] + targets: list[Target] ) -> None: self._targets = targets - self._poses_by_target_id.clear() + self._poses_by_target_label.clear() self._poses_by_detector_label.clear() self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) @@ -225,7 +222,7 @@ def _calculate_reprojection_error_for_pose( for point_index, image_point in enumerate(ray_set.image_points): reprojection_error_for_point = \ numpy.linalg.norm(projected_points[point_index, 0, 0:2] - image_point) - sum_reprojection_errors_squared += reprojection_error_for_point ** 2 + sum_reprojection_errors_squared += float(reprojection_error_for_point) ** 2 mean_reprojection_errors_squared: float = sum_reprojection_errors_squared / len(object_points_target) rms_reprojection_error = numpy.sqrt(mean_reprojection_errors_squared) return rms_reprojection_error @@ -321,142 +318,139 @@ def update(self) -> None: self._last_updated_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) self._poses_by_detector_label.clear() - self._poses_by_target_id.clear() + self._poses_by_target_label.clear() - corners: dict[str, dict[str, list[list[float]]]] # [detector_label][marker_id][point_index][x/y] - corners = { - detector_label: detector_record.get_corners() + annotation_list_by_detector_label: dict[str, list[Annotation]] + annotation_list_by_detector_label = { + detector_label: detector_record.get_annotations(deep_copy=True) for detector_label, detector_record in self._detector_records_by_detector_label.items()} detector_labels: list[str] = list(self._detector_records_by_detector_label.keys()) for detector_label in detector_labels: if detector_label not in self._intrinsics_by_detector_label: # TODO: Output a suitable warning that no intrinsics have been received, but don't do it every frame - del corners[detector_label] + del annotation_list_by_detector_label[detector_label] - reference_target: TargetBase = self._targets[0] + reference_target: Target = self._targets[0] for detector_label in detector_labels: if detector_label in self._extrinsics_by_detector_label: self._poses_by_detector_label[detector_label] = self._extrinsics_by_detector_label[detector_label] else: intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] reference_to_detector: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + annotations=annotation_list_by_detector_label[detector_label], target=reference_target, - corners_by_marker_id=corners[detector_label], detector_intrinsics=intrinsics) detector_to_reference: Matrix4x4 = Matrix4x4.from_numpy_array( numpy.linalg.inv(reference_to_detector.as_numpy_array())) self._poses_by_detector_label[detector_label] = detector_to_reference - # At the time of writing, each marker_id can be used only once. - # So we can remove marker_ids used by the reference_target to avoid unnecessary processing. + # At the time of writing, each feature label can be used only by one target. + # So we can remove annotations whose feature labels match those of the reference_target + # to avoid unnecessary processing. + reference_feature_labels: set[str] = set([landmark.feature_label for landmark in reference_target.landmarks]) for detector_label in detector_labels: - for marker_id in reference_target.get_marker_ids(): - corners[detector_label].pop(marker_id) - - rays: dict[str, list[list[Ray]]] = dict() # indexed as [marker_id][detector_index][corner_index] - detector_labels_by_marker_id: dict[str, list[str]] = dict() + indices_to_remove: list[int] = list() + for annotation_index, annotation in enumerate(annotation_list_by_detector_label[detector_label]): + if annotation.feature_label in reference_feature_labels: + indices_to_remove.append(annotation_index) + for annotation_index in reversed(indices_to_remove): + annotation_list_by_detector_label[detector_label].pop(annotation_index) + + # Convert annotations to rays + rays_by_feature_and_detector: dict[str, dict[str, Ray]] = dict() # indexed as [feature_label][detector_label] for detector_label in detector_labels: + annotations: list[Annotation] = annotation_list_by_detector_label[detector_label] + annotation_points: list[list[float]] = [[annotation.x_px, annotation.y_px] for annotation in annotations] detector_to_reference: Matrix4x4 = self._poses_by_detector_label[detector_label] intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] ray_origin: list[float] = detector_to_reference.get_translation() - ray_directions_by_marker_id: dict[str, list[list[float]]] # [marker_id][point_index][x/y/z] - ray_directions_by_marker_id = MathUtils.convert_detector_corners_to_vectors( - corners_by_marker_id=corners[detector_label], + ray_directions = MathUtils.convert_detector_points_to_vectors( + points=annotation_points, detector_intrinsics=intrinsics, detector_to_reference_matrix=detector_to_reference) - for marker_id, ray_directions in ray_directions_by_marker_id.items(): - if marker_id not in rays: - rays[marker_id] = list() - rays[marker_id].append([ - Ray(source_point=ray_origin, direction=ray_directions_by_marker_id[marker_id][corner_index]) - for corner_index in range(0, _CORNER_COUNT)]) - if marker_id not in detector_labels_by_marker_id: - detector_labels_by_marker_id[marker_id] = list() - detector_labels_by_marker_id[marker_id].append(detector_label) - - # intersect rays to find the 3D points for each marker corner in reference coordinates - intersections_by_marker_id: dict[str, list[list[float]]] = dict() # [marker_id][corner_index][x/y/z] - standalone_rays_marker_ids: list[str] = list() - for marker_id, rays_by_detector_index in rays.items(): - ray_list_by_corner_index: list[list[Ray]] = [[ - rays[marker_id][detector_index][corner_index] - for detector_index in range(0, len(rays[marker_id]))] - for corner_index in range(0, _CORNER_COUNT)] - intersections_appear_valid: bool = True # If something looks off, set this to False - corners_reference_by_corner_index: list[list[float]] = list() - for corner_index in range(0, _CORNER_COUNT): - intersection_result = MathUtils.closest_intersection_between_n_lines( - rays=ray_list_by_corner_index[corner_index], - maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) - if intersection_result.centroids.shape[0] == 0: - intersections_appear_valid = False - break - corners_reference_by_corner_index.append(list(intersection_result.centroid().flatten())) - if not intersections_appear_valid: - standalone_rays_marker_ids.append(marker_id) - continue - intersections_by_marker_id[marker_id] = corners_reference_by_corner_index + feature_labels: list[str] = [annotation.feature_label for annotation in annotations] + # note: annotation_labels and ray_directions have a 1:1 correspondence by index + assert len(ray_directions) == len(feature_labels) + for feature_index, feature_label in enumerate(feature_labels): + if feature_label not in rays_by_feature_and_detector: + rays_by_feature_and_detector[feature_label] = dict() + rays_by_feature_and_detector[feature_label][detector_label] = Ray( + source_point=ray_origin, + direction=ray_directions[feature_index]) + + # intersect rays to find the 3D points for each feature, in reference coordinates + # If intersection is not possible, then still note that rays exist via standalone_ray_feature_labels + intersections_by_feature_label: dict[str, list[float]] = dict() # [feature_label][dimension_index] + feature_labels_with_rays_only: list[str] = list() + for feature_label, rays_by_detector_label in rays_by_feature_and_detector.items(): + intersection_result = MathUtils.closest_intersection_between_n_lines( + rays=list(rays_by_detector_label.values()), + maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) + if intersection_result.centroids.shape[0] == 0: + feature_labels_with_rays_only.append(feature_label) + break + intersections_by_feature_label[feature_label] = list(intersection_result.centroid().flatten()) # We estimate the pose of each target based on the calculated intersections # and the rays projected from each detector for target in self._targets: if target.label == str(reference_target.label): - continue # everything is expressed relative to the reference... - detected_marker_ids_in_target: list[str] = target.get_marker_ids() - - marker_ids_with_intersections: list[str] = list() - marker_ids_with_rays: list[str] = list() - detector_labels: set[str] = set() - for marker_id in detected_marker_ids_in_target: - if marker_id in intersections_by_marker_id: - marker_ids_with_intersections.append(marker_id) - if marker_id in standalone_rays_marker_ids: - marker_ids_with_rays.append(marker_id) - if marker_id in detector_labels_by_marker_id: - for detector_label in detector_labels_by_marker_id[marker_id]: - detector_labels.add(detector_label) - - if len(marker_ids_with_intersections) <= 0 and len(marker_ids_with_rays) <= 0: + continue # everything is expressed relative to the reference, so it's a "known" coordinate system + feature_labels_in_target: list[str] = [landmark.feature_label for landmark in target.landmarks] + + target_feature_labels_with_intersections: list[str] = list() + target_feature_labels_with_rays: list[str] = list() + detector_labels_seeing_target: set[str] = set() + for target_feature_label in feature_labels_in_target: + if target_feature_label in intersections_by_feature_label: + target_feature_labels_with_intersections.append(target_feature_label) + if target_feature_label in feature_labels_with_rays_only: + target_feature_labels_with_rays.append(target_feature_label) + detector_labels_seeing_target |= set(rays_by_feature_and_detector[target_feature_label].keys()) + + if len(target_feature_labels_with_intersections) <= 0 and len(target_feature_labels_with_rays) <= 0: continue # No information on which to base a pose - if len(detector_labels) < self._parameters.minimum_detector_count: + detector_count_seeing_target: int = len(detector_labels_seeing_target) + if detector_count_seeing_target < self._parameters.minimum_detector_count or \ + detector_count_seeing_target <= 0: continue - # NB. len() == 0 or less for either of these indicates an internal error - one_detector_only: bool = (len(detector_labels) == 1) + one_detector_only: bool = (len(detector_labels_seeing_target) == 1) if one_detector_only: # Note: there cannot be any intersections in this case - detector_label: str = next(iter(detector_labels)) + detector_label: str = next(iter(detector_labels_seeing_target)) intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] detected_to_detector_matrix4x4: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + annotations=annotation_list_by_detector_label[detector_label], target=target, - corners_by_marker_id=corners[detector_label], detector_intrinsics=intrinsics) detected_to_detector: numpy.ndarray = detected_to_detector_matrix4x4.as_numpy_array() detector_to_reference: numpy.ndarray = self._poses_by_detector_label[detector_label].as_numpy_array() detected_to_reference: numpy.ndarray = detector_to_reference @ detected_to_detector - self._poses_by_target_id[target.label] = Matrix4x4.from_numpy_array(detected_to_reference) + self._poses_by_target_label[target.label] = Matrix4x4.from_numpy_array(detected_to_reference) else: # Fill in the required variables for the customized iterative closest point - detected_known_points: list[list[float]] = list(itertools.chain.from_iterable([ - target.get_points_for_marker_id(marker_id) - for marker_id in marker_ids_with_intersections])) - reference_known_points: list[list[float]] = list(itertools.chain.from_iterable([ - intersections_by_marker_id[marker_id] - for marker_id in marker_ids_with_intersections])) - detected_ray_points: list[list[float]] = list(itertools.chain.from_iterable([ - target.get_points_for_marker_id(marker_id) - for marker_id in marker_ids_with_rays])) + detected_known_points: list[list[float]] = [ + target.get_landmark_point(feature_label) + for feature_label in target_feature_labels_with_intersections] + reference_known_points: list[list[float]] = [ + intersections_by_feature_label[feature_label] + for feature_label in target_feature_labels_with_intersections] + detected_ray_points: list[list[float]] = [ + target.get_landmark_point(feature_label) + for feature_label in target_feature_labels_with_rays] reference_rays: list[Ray] = list(itertools.chain.from_iterable([ - rays[marker_id] for marker_id in marker_ids_with_rays])) + list(rays_by_feature_and_detector[feature_label].values()) + for feature_label in target_feature_labels_with_rays])) iterative_closest_point_parameters = IterativeClosestPointParameters( termination_iteration_count=self._parameters.icp_termination_iteration_count, termination_delta_translation=self._parameters.icp_termination_translation, termination_delta_rotation_radians=self._parameters.icp_termination_rotation_radians, termination_mean_point_distance=self._parameters.icp_termination_mean_point_distance, termination_rms_point_distance=self._parameters.icp_termination_rms_point_distance) - if len(marker_ids_with_intersections) >= 1: + if len(target_feature_labels_with_intersections) >= 1: initial_detected_to_reference_matrix = MathUtils.register_corresponding_points( point_set_from=detected_known_points, point_set_to=reference_known_points, @@ -475,4 +469,4 @@ def update(self) -> None: source_ray_points=detected_ray_points, target_rays=reference_rays, parameters=iterative_closest_point_parameters) - self._poses_by_target_id[target.label] = icp_output.source_to_target_matrix + self._poses_by_target_label[target.label] = icp_output.source_to_target_matrix diff --git a/src/pose_solver/pose_solver_api.py b/src/pose_solver/pose_solver_api.py index 0f3ae3e..b450747 100644 --- a/src/pose_solver/pose_solver_api.py +++ b/src/pose_solver/pose_solver_api.py @@ -1,6 +1,6 @@ from .api import \ PoseSolverAddDetectorFrameRequest, \ - PoseSolverAddTargetMarkerRequest, \ + PoseSolverAddTargetRequest, \ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ PoseSolverSetExtrinsicRequest, \ @@ -40,9 +40,9 @@ class PoseSolverAPI(MCTComponent): class Status: class Solve(StrEnum): - STOPPED: Final[int] = "stopped" - RUNNING: Final[int] = "running" - FAILURE: Final[int] = "failure" + STOPPED = "stopped" + RUNNING = "running" + FAILURE = "failure" solve_status: Solve solve_errors: list[str] @@ -79,10 +79,10 @@ def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: return EmptyResponse() def add_target(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddTargetMarkerRequest = self.get_kwarg( + request: PoseSolverAddTargetRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=PoseSolverAddTargetMarkerRequest) + arg_type=PoseSolverAddTargetRequest) try: self._pose_solver.add_target(target=request.target) except PoseSolverException as e: @@ -164,7 +164,7 @@ def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCT return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() return_value.update({ PoseSolverAddDetectorFrameRequest: self.add_detector_frame, - PoseSolverAddTargetMarkerRequest: self.add_target, + PoseSolverAddTargetRequest: self.add_target, PoseSolverGetPosesRequest: self.get_poses, PoseSolverSetExtrinsicRequest: self.set_extrinsic_matrix, PoseSolverSetIntrinsicRequest: self.set_intrinsic_parameters, diff --git a/src/pose_solver/pose_solver_app.py b/src/pose_solver/pose_solver_app.py index d4009b2..36d0d9d 100644 --- a/src/pose_solver/pose_solver_app.py +++ b/src/pose_solver/pose_solver_app.py @@ -1,6 +1,6 @@ from .api import \ PoseSolverAddDetectorFrameRequest, \ - PoseSolverAddTargetMarkerRequest, \ + PoseSolverAddTargetRequest, \ PoseSolverGetPosesResponse, \ PoseSolverSetIntrinsicRequest from .pose_solver import \ @@ -55,7 +55,7 @@ async def add_marker_corners( @pose_solver_app.post("/add_target") async def add_target_marker( - request: PoseSolverAddTargetMarkerRequest + request: PoseSolverAddTargetRequest ) -> EmptyResponse | ErrorResponse: return pose_solver_api.add_target(request=request) diff --git a/src/pose_solver/structures.py b/src/pose_solver/structures.py index 3032f92..55033fe 100644 --- a/src/pose_solver/structures.py +++ b/src/pose_solver/structures.py @@ -6,79 +6,47 @@ from pydantic import BaseModel, Field -class DetectorFrameRecord: - _detector_label: str - _frame: DetectorFrame - _timestamp_utc: datetime.datetime | None - _corners_by_marker_id: dict[str, list[list[float]]] | None - - def __init__( - self, - detector_label: str, - frame: DetectorFrame - ): - self._detector_label = detector_label - self._frame = frame - self._timestamp_utc = None # calculated when needed - self._corners_by_marker_id = None - - def _init_corners_by_marker_id(self): - self._corners_by_marker_id = dict() - annotations: list[Annotation] = self._frame.annotations_identified - for annotation in annotations: - base_label: str = annotation.base_label() - if base_label in self._corners_by_marker_id.keys(): - continue - self._corners_by_marker_id[base_label] = [ - [annotation.x_px, annotation.y_px] - for annotation in annotations - if annotation.base_label() == base_label] - - def get_detector_label(self) -> str: - return self._detector_label - - def get_frame(self) -> DetectorFrame: - return self._frame - - def get_marker_corners_by_marker_id( - self, - marker_id: str - ) -> list[list[float]] | None: - if self._corners_by_marker_id is None: - self._init_corners_by_marker_id() - if marker_id in list(self._corners_by_marker_id.keys()): - return self._corners_by_marker_id[marker_id] - return None - - def get_marker_ids_detected(self) -> list[str]: - if self._corners_by_marker_id is None: - self._init_corners_by_marker_id() - return list(self._corners_by_marker_id.keys()) - - def get_timestamp_utc(self): - if self._timestamp_utc is None: - self._timestamp_utc = self._frame.timestamp_utc - return self._timestamp_utc +class DetectorRecord: + """ + Class whose purpose is to keep track of the latest position of each landmark (in annotation form) + for a single detector. + """ + class TimestampedAnnotation: + annotation: Annotation + timestamp_utc: datetime.datetime + def __init__( + self, + annotation: Annotation, + timestamp_utc: datetime.datetime + ): + self.annotation = annotation + self.timestamp_utc = timestamp_utc -class DetectorRecord: - _frame_records_by_marker_id: dict[str, DetectorFrameRecord] = Field(default_factory=dict) + _timestamped_annotations: dict[str, TimestampedAnnotation] def __init__(self): - self._frame_records_by_marker_id = dict() + self._timestamped_annotations = dict() def add_frame_record( self, - frame_record: DetectorFrameRecord + frame: DetectorFrame ) -> None: - marker_ids: list[str] = frame_record.get_marker_ids_detected() - for marker_id in marker_ids: - if marker_id not in self._frame_records_by_marker_id or \ - frame_record.get_timestamp_utc() > self._frame_records_by_marker_id[marker_id].get_timestamp_utc(): - self._frame_records_by_marker_id[marker_id] = frame_record + for annotation in frame.annotations: + if annotation.feature_label not in self._timestamped_annotations: + self._timestamped_annotations[annotation.feature_label] = DetectorRecord.TimestampedAnnotation( + annotation=annotation, + timestamp_utc=frame.timestamp_utc) + continue + timestamped_annotation: DetectorRecord.TimestampedAnnotation = \ + self._timestamped_annotations[annotation.feature_label] + if frame.timestamp_utc > timestamped_annotation.timestamp_utc: + self._timestamped_annotations[annotation.feature_label] = DetectorRecord.TimestampedAnnotation( + annotation=annotation, + timestamp_utc=frame.timestamp_utc) def clear_frame_records(self): - self._frame_records_by_marker_id.clear() + self._timestamped_annotations.clear() def clear_frame_records_older_than( self, @@ -87,30 +55,23 @@ def clear_frame_records_older_than( """ returns True if any changes were made """ - return_value: bool = False - marker_ids: list[str] = list(self._frame_records_by_marker_id.keys()) - for marker_id in marker_ids: - frame_record: DetectorFrameRecord = self._frame_records_by_marker_id[marker_id] - if frame_record.get_timestamp_utc() < timestamp_utc: - del self._frame_records_by_marker_id[marker_id] - return_value = True - return return_value - - def get_corners( - self - ) -> dict[str, list[list[float]]]: # [marker_id][point_index][x/y/z] - corners_by_marker_id: dict[str, list[list[float]]] = dict() - for marker_id, frame_record in self._frame_records_by_marker_id.items(): - corners_by_marker_id[marker_id] = frame_record.get_marker_corners_by_marker_id(marker_id=marker_id) - return corners_by_marker_id - - def get_corners_for_marker_id( + feature_labels_to_remove: list[str] = list() + for entry in self._timestamped_annotations.values(): + if entry.timestamp_utc < timestamp_utc: + feature_labels_to_remove.append(entry.annotation.feature_label) + if len(feature_labels_to_remove) <= 0: + return False + for feature_label in feature_labels_to_remove: + del self._timestamped_annotations[feature_label] + return True + + def get_annotations( self, - marker_id: str - ) -> list[list[float]] | None: # [point_index][x/y/z] - if marker_id not in self._frame_records_by_marker_id: - return None - return self._frame_records_by_marker_id[marker_id].get_marker_corners_by_marker_id(marker_id=marker_id) + deep_copy: bool = True + ) -> list[Annotation]: + if deep_copy: + return [entry.annotation.model_copy() for entry in self._timestamped_annotations.values()] + return [entry.annotation for entry in self._timestamped_annotations.values()] class PoseSolverConfiguration(BaseModel): diff --git a/src/util/generate_target_definition_from_charuco.py b/src/util/generate_target_definition_from_charuco.py index 8815921..578311a 100644 --- a/src/util/generate_target_definition_from_charuco.py +++ b/src/util/generate_target_definition_from_charuco.py @@ -1,22 +1,21 @@ from src.implementations.common_aruco_opencv import ArucoOpenCVCommon -from src.common.structures import \ - TargetBoard -from src.common.structures import Marker +from src.common.structures import Annotation, Landmark, Target board: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() points: list[list[float]] = board.get_marker_corner_points() -markers: list[Marker] = list() +landmarks: list[Landmark] = list() POINTS_PER_MARKER: int = 4 marker_count: int = round(int(len(points) / POINTS_PER_MARKER)) for marker_index in range(marker_count): point_start_index: int = marker_index * POINTS_PER_MARKER marker_points = points[point_start_index: point_start_index + POINTS_PER_MARKER] - markers.append(Marker( - marker_id=f"{marker_index}", - points=marker_points)) -target: TargetBoard = TargetBoard( - target_id="reference", - markers=markers) + for corner_index, corner_point in enumerate(marker_points): + landmarks.append(Landmark( + feature_label=f"{marker_index}{Annotation.RELATION_CHARACTER}{corner_index}", + x=corner_point[0], y=corner_point[1], z=corner_point[2])) +target: Target = Target( + label="CharucoBoard", + landmarks=landmarks) with open("temp.json", 'w') as outfile: - outfile.write(target.json(exclude_unset=True, indent=2)) + outfile.write(target.model_dump_json(exclude_unset=True, indent=2)) diff --git a/test/test_pose_solver.py b/test/test_pose_solver.py index f39d777..04c0162 100644 --- a/test/test_pose_solver.py +++ b/test/test_pose_solver.py @@ -1,31 +1,30 @@ from src.pose_solver.pose_solver import PoseSolver from src.common.structures import \ + Annotation, \ DetectorFrame, \ ImageResolution, \ IntrinsicParameters, \ - Annotation, \ Matrix4x4, \ Pose, \ - RELATION_CHARACTER, \ - TargetMarker + Target +from src.implementations.common_aruco_opencv import ArucoOpenCVCommon import datetime from typing import Final import unittest IMAGE_RESOLUTION: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) +REL_CHAR: Final[str] = Annotation.RELATION_CHARACTER # For brevity MARKER_SIZE_MM: Final[float] = 10.0 REFERENCE_TARGET_ID: Final[str] = "reference" REFERENCE_MARKER_ID: Final[str] = "0" -REFERENCE_MARKER_TARGET: Final[TargetMarker] = TargetMarker( - label=REFERENCE_TARGET_ID, - marker_id=REFERENCE_MARKER_ID, +REFERENCE_MARKER_TARGET: Final[Target] = ArucoOpenCVCommon.target_from_marker_parameters( + base_label=REFERENCE_MARKER_ID, marker_size=MARKER_SIZE_MM) TARGET_TARGET_ID: Final[str] = "target" TARGET_MARKER_ID: Final[str] = "1" -TARGET_MARKER_TARGET: Final[TargetMarker] = TargetMarker( - label=TARGET_TARGET_ID, - marker_id=TARGET_MARKER_ID, +TARGET_MARKER_TARGET: Final[Target] = ArucoOpenCVCommon.target_from_marker_parameters( + base_label=TARGET_MARKER_ID, marker_size=MARKER_SIZE_MM) DETECTOR_RED_NAME: Final[str] = "det_red" DETECTOR_RED_INTRINSICS: Final[IntrinsicParameters] = IntrinsicParameters( @@ -115,14 +114,14 @@ def test_single_camera_viewing_target_marker(self): detector_label=DETECTOR_RED_NAME, detector_frame=DetectorFrame( annotations=[ - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=375, y_px=347), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=415, y_px=346), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=416, y_px=386), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=376, y_px=386), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=541, y_px=347), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=581, y_px=348), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=580, y_px=388), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=540, y_px=387)], + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=375, y_px=347), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=415, y_px=346), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=416, y_px=386), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=376, y_px=386), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=541, y_px=347), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=581, y_px=348), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=580, y_px=388), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=540, y_px=387)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.update() @@ -179,56 +178,56 @@ def test_four_cameras_viewing_target_marker(self): detector_label=DETECTOR_RED_NAME, detector_frame=DetectorFrame( annotations=[ - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=157, y_px=210), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=165, y_px=221), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=139, y_px=229), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=131, y_px=217), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=196, y_px=266), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=206, y_px=281), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=178, y_px=291), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=167, y_px=275)], + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=157, y_px=210), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=165, y_px=221), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=139, y_px=229), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=131, y_px=217), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=196, y_px=266), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=206, y_px=281), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=178, y_px=291), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=167, y_px=275)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.add_detector_frame( detector_label=DETECTOR_SKY_NAME, detector_frame=DetectorFrame( annotations=[ - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=190, y_px=234), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=219, y_px=246), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=195, y_px=270), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=166, y_px=257), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=317, y_px=290), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=352, y_px=306), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=332, y_px=333), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=296, y_px=317)], + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=190, y_px=234), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=219, y_px=246), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=195, y_px=270), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=166, y_px=257), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=317, y_px=290), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=352, y_px=306), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=332, y_px=333), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=296, y_px=317)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.add_detector_frame( detector_label=DETECTOR_GREEN_NAME, detector_frame=DetectorFrame( annotations=[ - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=247, y_px=304), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=283, y_px=296), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=291, y_px=326), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=254, y_px=334), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=392, y_px=277), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=426, y_px=271), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=438, y_px=299), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=403, y_px=305)], + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=247, y_px=304), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=283, y_px=296), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=291, y_px=326), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=254, y_px=334), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=392, y_px=277), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=426, y_px=271), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=438, y_px=299), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=403, y_px=305)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.add_detector_frame( detector_label=DETECTOR_YELLOW_NAME, detector_frame=DetectorFrame( annotations=[ - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}0", x_px=275, y_px=277), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}1", x_px=289, y_px=251), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}2", x_px=321, y_px=261), - Annotation(label=f"{str(REFERENCE_MARKER_ID)}{RELATION_CHARACTER}3", x_px=306, y_px=288), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}0", x_px=332, y_px=177), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}1", x_px=344, y_px=156), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}2", x_px=372, y_px=163), - Annotation(label=f"{str(TARGET_MARKER_ID)}{RELATION_CHARACTER}3", x_px=361, y_px=185)], + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=275, y_px=277), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=289, y_px=251), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=321, y_px=261), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=306, y_px=288), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=332, y_px=177), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=344, y_px=156), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=372, y_px=163), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=361, y_px=185)], image_resolution=IMAGE_RESOLUTION, timestamp_utc_iso8601=now_utc.isoformat())) pose_solver.update() From dc96904cbeeed13cc9589f5434ab78e01db1468f Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Tue, 15 Jul 2025 14:58:34 -0400 Subject: [PATCH 10/33] WIP: Further consolidation --- src/board_builder/board_builder.py | 10 +- .../test/accuracy/accuracy_test.py | 3 +- .../structures/accuracy_test_parameters.py | 2 +- src/board_builder/test/graph_search_test.py | 5 +- .../utils/board_builder_pose_solver.py | 4 +- src/board_builder/utils/graph_search.py | 4 +- src/common/__init__.py | 45 ++- src/common/annotator.py | 14 +- src/common/api.py | 4 +- src/common/camera.py | 14 +- src/common/exceptions.py | 3 - src/common/image_processing.py | 287 ++++++++++++++++++ src/common/intrinsic_calibrator.py | 19 +- src/common/{util/math_utils.py => math.py} | 172 ++++++++++- src/common/mct_component.py | 47 ++- .../{util/io_utils.py => serialization.py} | 191 +++++++++++- src/common/{status_messages.py => status.py} | 5 + src/common/structures/__init__.py | 32 -- src/common/structures/image.py | 149 --------- src/common/structures/linear_algebra.py | 163 ---------- src/common/structures/serialization.py | 190 ------------ src/common/structures/tracking.py | 37 --- src/common/util/__init__.py | 3 - src/common/util/image_utils.py | 142 --------- src/controller/__init__.py | 1 - src/controller/configuration.py | 2 +- src/controller/connection.py | 19 +- src/controller/exceptions.py | 5 - src/controller/mct_controller.py | 13 +- src/detector/api.py | 13 +- src/detector/detector.py | 25 +- src/detector/detector_app.py | 3 +- src/gui/panels/base_panel.py | 9 +- src/gui/panels/board_builder_panel.py | 9 +- src/gui/panels/calibrator_panel.py | 3 +- src/gui/panels/detector_panel.py | 11 +- src/gui/panels/pose_solver_panel.py | 7 +- .../panels/specialized/graphics_renderer.py | 6 +- src/implementations/annotator_aruco_opencv.py | 7 +- .../camera_opencv_capture_device.py | 15 +- src/implementations/camera_picamera2.py | 12 +- src/implementations/common_aruco_opencv.py | 4 +- .../intrinsic_charuco_opencv.py | 28 +- src/pose_solver/api.py | 5 +- src/pose_solver/exceptions.py | 9 - src/pose_solver/pose_solver.py | 15 +- src/pose_solver/pose_solver_api.py | 8 +- src/pose_solver/structures.py | 2 +- ...generate_target_definition_from_charuco.py | 2 +- test/test_extrinsic_calibration.py | 27 +- test/test_math_utils.py | 4 +- test/test_pose_solver.py | 2 +- 52 files changed, 897 insertions(+), 914 deletions(-) delete mode 100644 src/common/exceptions.py create mode 100644 src/common/image_processing.py rename src/common/{util/math_utils.py => math.py} (83%) rename src/common/{util/io_utils.py => serialization.py} (50%) rename src/common/{status_messages.py => status.py} (97%) delete mode 100644 src/common/structures/__init__.py delete mode 100644 src/common/structures/image.py delete mode 100644 src/common/structures/linear_algebra.py delete mode 100644 src/common/structures/serialization.py delete mode 100644 src/common/structures/tracking.py delete mode 100644 src/common/util/__init__.py delete mode 100644 src/common/util/image_utils.py delete mode 100644 src/controller/exceptions.py delete mode 100644 src/pose_solver/exceptions.py diff --git a/src/board_builder/board_builder.py b/src/board_builder/board_builder.py index 5258593..11247d5 100644 --- a/src/board_builder/board_builder.py +++ b/src/board_builder/board_builder.py @@ -1,15 +1,13 @@ +from .utils import BoardBuilderPoseSolver +from .structures import PoseLocation, Marker, MarkerCorners, TargetBoard +from src.common import Pose, Annotation, Matrix4x4 +from collections import defaultdict import datetime import json import os import numpy as np - -from collections import defaultdict from typing import Final -from .utils import BoardBuilderPoseSolver -from .structures import PoseLocation, MarkerCorners -from src.common.structures import Pose, Annotation, Matrix4x4 -from .structures import Marker, TargetBoard _HOMOGENEOUS_POINT_COORD: Final[int] = 4 TESTED_BOARD_NAME: str = 'top_data.json' # If collecting data for repeatability test, specify the file name. cube_data.json, planar_data.json, top_data.json diff --git a/src/board_builder/test/accuracy/accuracy_test.py b/src/board_builder/test/accuracy/accuracy_test.py index c1385e9..c68ecf5 100644 --- a/src/board_builder/test/accuracy/accuracy_test.py +++ b/src/board_builder/test/accuracy/accuracy_test.py @@ -1,6 +1,5 @@ from src.board_builder.board_builder import BoardBuilder -from src.common import MathUtils -from src.common.structures import Annotation +from src.common import Annotation, MathUtils from src.board_builder.structures import \ Marker, \ TargetBoard diff --git a/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py b/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py index 0b42bec..a900fe4 100644 --- a/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py +++ b/src/board_builder/test/accuracy/structures/accuracy_test_parameters.py @@ -1,6 +1,6 @@ import numpy as np -from src.common.structures import Matrix4x4, Pose, IntrinsicParameters +from src.common import Matrix4x4, Pose, IntrinsicParameters class AccuracyTestParameters: diff --git a/src/board_builder/test/graph_search_test.py b/src/board_builder/test/graph_search_test.py index 3af6ccf..67c7245 100644 --- a/src/board_builder/test/graph_search_test.py +++ b/src/board_builder/test/graph_search_test.py @@ -1,8 +1,9 @@ -from src.board_builder.structures import PoseLocation import datetime -from src.common.structures import Matrix4x4 +from src.common import Matrix4x4 +from src.board_builder.structures import PoseLocation from src.board_builder.utils.graph_search import create_graph, bfs_shortest_path, get_transform_from_root + """ Description: Test case for the BFS/graph_search algorithm """ diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py index 1238e7b..b5b90c9 100644 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ b/src/board_builder/utils/board_builder_pose_solver.py @@ -3,10 +3,10 @@ MarkerRaySet, \ PoseData, \ PoseLocation -from src.common import MathUtils -from src.common.structures import \ +from src.common import \ IntrinsicParameters, \ IterativeClosestPointParameters, \ + MathUtils, \ Matrix4x4, \ Pose, \ Ray diff --git a/src/board_builder/utils/graph_search.py b/src/board_builder/utils/graph_search.py index a7c52aa..a2f8256 100644 --- a/src/board_builder/utils/graph_search.py +++ b/src/board_builder/utils/graph_search.py @@ -1,8 +1,8 @@ +from src.board_builder.structures import MatrixNode, PoseLocation +from src.common import Matrix4x4 from collections import deque, defaultdict from typing import Dict, List, Tuple -from src.board_builder.structures import MatrixNode, PoseLocation -from src.common.structures import Matrix4x4 def create_graph( relative_pose_matrix: List[List[PoseLocation | None]], diff --git a/src/common/__init__.py b/src/common/__init__.py index 6493d22..4e30083 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -17,17 +17,46 @@ from .camera import \ Camera, \ MCTCameraRuntimeError -from .exceptions import \ - MCTError +from .image_processing import \ + Annotation, \ + ImageFormat, \ + ImageResolution, \ + ImageUtils, \ + IntrinsicCalibration, \ + IntrinsicParameters from .intrinsic_calibrator import \ IntrinsicCalibrator, \ MCTIntrinsicCalibrationError -from .mct_component import MCTComponent -from .status_messages import \ +from .math import \ + IterativeClosestPointParameters, \ + Landmark, \ + MathUtils, \ + Matrix4x4, \ + Pose, \ + Ray, \ + Target +from .mct_component import \ + DetectorFrame, \ + MCTComponent, \ + PoseSolverFrame +from .serialization import \ + IOUtils, \ + KeyValueSimpleAbstract, \ + KeyValueSimpleAny, \ + KeyValueSimpleBool, \ + KeyValueSimpleString, \ + KeyValueSimpleFloat, \ + KeyValueSimpleInt, \ + KeyValueMetaAbstract, \ + KeyValueMetaAny, \ + KeyValueMetaBool, \ + KeyValueMetaEnum, \ + KeyValueMetaFloat, \ + KeyValueMetaInt, \ + MCTSerializationError, \ + MCTDeserializable +from .status import \ + MCTError, \ SeverityLabel, \ StatusMessage, \ StatusMessageSource -from .util import \ - ImageUtils, \ - IOUtils, \ - MathUtils diff --git a/src/common/annotator.py b/src/common/annotator.py index 88f9447..4d9872e 100644 --- a/src/common/annotator.py +++ b/src/common/annotator.py @@ -1,12 +1,12 @@ -from .exceptions import \ - MCTError -from .status_messages import \ - SeverityLabel, \ - StatusMessageSource -from .structures import \ - Annotation, \ +from .image_processing import \ + Annotation +from .serialization import \ KeyValueMetaAny, \ KeyValueSimpleAny +from .status import \ + MCTError, \ + SeverityLabel, \ + StatusMessageSource import abc import datetime from enum import StrEnum diff --git a/src/common/api.py b/src/common/api.py index 22c6477..b7c7f20 100644 --- a/src/common/api.py +++ b/src/common/api.py @@ -1,5 +1,5 @@ -from .status_messages import StatusMessage -from .structures import MCTDeserializable +from .serialization import MCTDeserializable +from .status import StatusMessage import abc from pydantic import BaseModel, Field, SerializeAsAny from typing import Final, Literal diff --git a/src/common/camera.py b/src/common/camera.py index 953ab70..1a8c333 100644 --- a/src/common/camera.py +++ b/src/common/camera.py @@ -1,13 +1,13 @@ -from .exceptions import \ - MCTError -from .status_messages import \ - SeverityLabel, \ - StatusMessageSource -from .structures import \ +from .image_processing import \ ImageFormat, \ - ImageResolution, \ + ImageResolution +from .serialization import \ KeyValueSimpleAny, \ KeyValueMetaAbstract +from .status import \ + MCTError, \ + SeverityLabel, \ + StatusMessageSource import abc import base64 import cv2 diff --git a/src/common/exceptions.py b/src/common/exceptions.py deleted file mode 100644 index 97abf2f..0000000 --- a/src/common/exceptions.py +++ /dev/null @@ -1,3 +0,0 @@ -class MCTError(Exception): - def __init__(self, *args): - super().__init__(*args) diff --git a/src/common/image_processing.py b/src/common/image_processing.py new file mode 100644 index 0000000..393ebea --- /dev/null +++ b/src/common/image_processing.py @@ -0,0 +1,287 @@ +import base64 +import cv2 +from enum import StrEnum +import logging +import math # Python's math module, not the one from this project! +import numpy +from pydantic import BaseModel, Field +from typing import ClassVar, Literal, Final + + +logger = logging.getLogger(__file__) + +ColorMode = Literal["color", "greyscale"] + + +class Annotation(BaseModel): + """ + A distinct point as detected on a detector image. + """ + + # These can denote that multiple landmarks are related if they share the same + # "base label" (the part before the first and only occurrence of this character). + RELATION_CHARACTER: ClassVar[str] = "$" + + UNIDENTIFIED_LABEL: ClassVar[str] = str() + + feature_label: str = Field() # Empty indicates that something was detected but not identified + x_px: float = Field() + y_px: float = Field() + + def base_feature_label(self) -> str: + """ + Part of the label before the RELATION_CHARACTER. + """ + if self.RELATION_CHARACTER not in self.feature_label: + return self.feature_label + return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] + + +class ImageFormat(StrEnum): + FORMAT_PNG = ".png" + FORMAT_JPG = ".jpg" + + +class ImageResolution(BaseModel): + x_px: int = Field() + y_px: int = Field() + + def __eq__(self, other) -> bool: + if type(self) is not type(other): + return False + return \ + self.x_px == other.x_px and \ + self.y_px == other.y_px + + def __hash__(self) -> int: + return hash(str(self)) + + def __lt__(self, other): + if not isinstance(other, ImageResolution): + raise ValueError() + if self.x_px < other.x_px: + return True + elif self.x_px > other.x_px: + return False + elif self.y_px < other.y_px: + return True + else: + return False + + def __str__(self): + return f"{self.x_px}x{self.y_px}" + + @staticmethod + def from_str(in_str: str) -> 'ImageResolution': + if 'x' not in in_str: + raise ValueError("in_str is expected to contain delimiter 'x'.") + parts: list[str] = in_str.split('x') + if len(parts) > 2: + raise ValueError("in_str is expected to contain exactly one 'x'.") + x_px = int(parts[0]) + y_px = int(parts[1]) + return ImageResolution(x_px=x_px, y_px=y_px) + + +class IntrinsicParameters(BaseModel): + """ + Camera intrinsic parameters (focal length, optical center, distortion coefficients). + See OpenCV's documentation: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html + See Wikipedia article: https://en.wikipedia.org/wiki/Distortion_%28optics%29 + """ + + focal_length_x_px: float = Field() + focal_length_y_px: float = Field() + optical_center_x_px: float = Field() + optical_center_y_px: float = Field() + + radial_distortion_coefficients: list[float] = Field() # k1, k2, k3 etc in OpenCV + + tangential_distortion_coefficients: list[float] = Field() # p1, p2 in OpenCV + + def as_array(self) -> list[float]: + return_value: list[float] = [ + self.focal_length_x_px, + self.focal_length_y_px, + self.optical_center_x_px, + self.optical_center_y_px] + return_value += self.get_distortion_coefficients() + return return_value + + def get_matrix(self) -> list[list[float]]: + """calibration matrix expected by OpenCV in some operations""" + return \ + [[self.focal_length_x_px, 0.0, self.optical_center_x_px], + [0.0, self.focal_length_y_px, self.optical_center_y_px], + [0.0, 0.0, 1.0]] + + def get_distortion_coefficients(self) -> list[float]: + """ + Distortion coefficients in array format expected by OpenCV in some operations. + See https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d + calibrateCamera() documentation describes order of distortion coefficients that OpenCV works with + """ + coefficients: list[float] = [ + self.radial_distortion_coefficients[0], + self.radial_distortion_coefficients[1], + self.tangential_distortion_coefficients[0], + self.tangential_distortion_coefficients[1]] + coefficients += self.radial_distortion_coefficients[2:] + return coefficients + + @staticmethod + def generate_zero_parameters( + resolution_x_px: int, + resolution_y_px: int, + fov_x_degrees: float = 45.0, + fov_y_degrees: float = 45.0 + ) -> "IntrinsicParameters": + optical_center_x_px: int = int(round(resolution_x_px/2.0)) + fov_x_radians: float = fov_x_degrees * math.pi / 180.0 + focal_length_x_px = (resolution_x_px / 2.0) / math.tan(fov_x_radians / 2.0) + optical_center_y_px: int = int(round(resolution_y_px/2.0)) + fov_y_radians: float = fov_y_degrees * math.pi / 180.0 + focal_length_y_px = (resolution_y_px / 2.0) / math.tan(fov_y_radians / 2.0) + return IntrinsicParameters( + focal_length_x_px=focal_length_x_px, + focal_length_y_px=focal_length_y_px, + optical_center_x_px=optical_center_x_px, + optical_center_y_px=optical_center_y_px, + radial_distortion_coefficients=[0.0, 0.0, 0.0], + tangential_distortion_coefficients=[0.0, 0.0]) + + +class IntrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + image_resolution: ImageResolution = Field() + calibrated_values: IntrinsicParameters = Field() + supplemental_data: dict = Field() + + +class ImageUtils: + """ + A "class" to group related static functions, like in a namespace. + The class itself is not meant to be instantiated. + """ + + def __init__(self): + raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") + + @staticmethod + def base64_to_image( + input_base64: str, + color_mode: ColorMode = "color" + ) -> numpy.ndarray: + """ + Assumes 8 bits per component + """ + + image_bytes: bytes = base64.b64decode(s=input_base64) + + color_flag: int = 0 + if color_mode == "color": + color_flag |= cv2.IMREAD_COLOR + elif color_mode == "greyscale": + color_flag |= cv2.IMREAD_GRAYSCALE + else: + logger.warning(f"Unsupported color mode specified: {color_mode}") + + opencv_image: numpy.ndarray = cv2.imdecode( + buf=numpy.frombuffer( + buffer=image_bytes, + dtype=numpy.uint8), + flags=color_flag) + return opencv_image + + @staticmethod + def black_image( + resolution_px: tuple[int, int], + ) -> numpy.ndarray: + return numpy.zeros((resolution_px[1], resolution_px[0], 3), dtype=numpy.uint8) + + @staticmethod + def bytes_to_base64( + image_bytes: bytes + ) -> str: + return base64.b64encode(image_bytes).decode("ascii") + + @staticmethod + def image_resize_to_fit( + opencv_image: numpy.ndarray, + available_size: tuple[int, int] # x, y + ) -> numpy.ndarray: + # note: opencv height represented by 1st dimension + source_resolution_px: tuple[int, int] = (opencv_image.shape[1], opencv_image.shape[0]) + image_width_px, image_height_px = ImageUtils.scale_factor_for_available_space_px( + source_resolution_px=source_resolution_px, + available_size_px=available_size) + return cv2.resize( + src=opencv_image, + dsize=(image_width_px, image_height_px)) + + @staticmethod + def image_to_base64( + image_data: numpy.ndarray, + image_format: ImageFormat = ".png", + ) -> str: + """ + :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) + :param image_format: e.g. ".jpg", ".png"... + :return: base64 string representing the image + """ + encoded_image_rgb_bytes: bytes = ImageUtils.image_to_bytes( + image_data=image_data, + image_format=image_format) + encoded_image_rgb_base64: str = ImageUtils.bytes_to_base64(encoded_image_rgb_bytes) + return encoded_image_rgb_base64 + + @staticmethod + def image_to_bytes( + image_data: numpy.ndarray, + image_format: ImageFormat = ".png", + ) -> bytes: + """ + :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) + :param image_format: e.g. ".jpg", ".png"... + :return: base64 string representing the image + """ + encoded_image_rgb_single_row: numpy.array + encoded, encoded_image_rgb_single_row = cv2.imencode(image_format, image_data) + encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() + return encoded_image_rgb_bytes + + @staticmethod + def scale_factor_for_available_space_px( + source_resolution_px: tuple[int, int], + available_size_px: tuple[int, int] + ) -> tuple[int, int]: + source_width_px: int = source_resolution_px[0] + source_height_px: int = source_resolution_px[1] + available_width_px: int = available_size_px[0] + available_height_px: int = available_size_px[1] + scale: float = min( + available_width_px / float(source_width_px), + available_height_px / float(source_height_px)) + return int(round(source_width_px * scale)), int(round(source_height_px * scale)) + + class StandardResolutions: + RES_640X360: Final[ImageResolution] = ImageResolution(x_px=640, y_px=360) + RES_640X480: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) + RES_800X600: Final[ImageResolution] = ImageResolution(x_px=800, y_px=600) + RES_1024X768: Final[ImageResolution] = ImageResolution(x_px=1024, y_px=768) + RES_1280X720: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=720) + RES_1280X800: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=800) + RES_1280X1024: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=1024) + RES_1920X1080: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) + + @staticmethod + def as_list(): + return [ + ImageUtils.StandardResolutions.RES_640X360, + ImageUtils.StandardResolutions.RES_640X480, + ImageUtils.StandardResolutions.RES_800X600, + ImageUtils.StandardResolutions.RES_1024X768, + ImageUtils.StandardResolutions.RES_1280X720, + ImageUtils.StandardResolutions.RES_1280X800, + ImageUtils.StandardResolutions.RES_1280X1024, + ImageUtils.StandardResolutions.RES_1920X1080] diff --git a/src/common/intrinsic_calibrator.py b/src/common/intrinsic_calibrator.py index bfa9c45..81e345a 100644 --- a/src/common/intrinsic_calibrator.py +++ b/src/common/intrinsic_calibrator.py @@ -1,11 +1,13 @@ -from .exceptions import MCTError -from .status_messages import SeverityLabel, StatusMessageSource -from .structures import \ +from .image_processing import \ ImageResolution, \ - IntrinsicCalibration -from .util import \ ImageUtils, \ + IntrinsicCalibration +from .serialization import \ IOUtils +from .status import \ + MCTError, \ + SeverityLabel, \ + StatusMessageSource import abc import datetime from enum import StrEnum @@ -111,10 +113,7 @@ class IntrinsicCalibrator(abc.ABC): ResultMetadata: type[_ResultMetadata] = _ResultMetadata DataMap: type[_DataMap] = _DataMap - class IntrinsicCalibratorConfiguration(BaseModel): - data_path: str = Field() - - _configuration: IntrinsicCalibratorConfiguration + _configuration: Configuration _calibration_map: dict[ImageResolution, _DataMapValue] _status_message_source: StatusMessageSource @@ -125,7 +124,7 @@ class IntrinsicCalibratorConfiguration(BaseModel): def __init__( self, - configuration: IntrinsicCalibratorConfiguration, + configuration: Configuration, status_message_source: StatusMessageSource ): self._configuration = configuration diff --git a/src/common/util/math_utils.py b/src/common/math.py similarity index 83% rename from src/common/util/math_utils.py rename to src/common/math.py index 27b4c6f..cd91235 100644 --- a/src/common/util/math_utils.py +++ b/src/common/math.py @@ -1,18 +1,171 @@ -from ..structures import \ +from .image_processing import \ Annotation, \ - IterativeClosestPointParameters, \ - IntrinsicParameters, \ - Matrix4x4, \ - Ray, \ - Target + IntrinsicParameters import cv2 import numpy +from pydantic import BaseModel, Field from scipy.spatial.transform import Rotation -from typing import TypeVar +from typing import ClassVar, Final -XPointKey = TypeVar("XPointKey") -_DEFAULT_EPSILON: float = 0.0001 +_DEFAULT_EPSILON: Final[float] = 0.0001 + + +class IterativeClosestPointParameters(BaseModel): + # ICP will stop after this many iterations + termination_iteration_count: int = Field() + + # ICP will stop if distance *and* angle difference from one iteration to the next + # is smaller than these + termination_delta_translation: float = Field() + termination_delta_rotation_radians: float = Field() + + # ICP will stop if overall point-to-point distance (between source and target) + # mean *or* root-mean-square is less than specified + termination_mean_point_distance: float = Field() + termination_rms_point_distance: float = Field() # root-mean-square + + +class Landmark(BaseModel): + + # These can denote that multiple landmarks are related if they share the same + # "base label" (the part before the first and only occurrence of this character). + RELATION_CHARACTER: ClassVar[str] = "$" + + """ + A distinct point in 3D space. + Coordinates are in the unit of the user's choosing. + """ + feature_label: str = Field() + x: float = Field() + y: float = Field() + z: float = Field() + + def as_float_list(self) -> list[float]: + return [self.x, self.y, self.z] + + def base_feature_label(self) -> str: + """ + Part of the label before the RELATION_CHARACTER. + """ + if self.RELATION_CHARACTER not in self.feature_label: + return self.feature_label + return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] + + +class Matrix4x4(BaseModel): + + @staticmethod + def _identity_values() -> list[float]: + return \ + [1.0, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0] + values: list[float] = Field(default_factory=_identity_values) + + def as_numpy_array(self): + a = self.values + return numpy.asarray( + [[a[0], a[1], a[2], a[3]], + [a[4], a[5], a[6], a[7]], + [a[8], a[9], a[10], a[11]], + [a[12], a[13], a[14], a[15]]]) + + def __getitem__(self, idx: tuple[int, int]) -> float: + if isinstance(idx, tuple): + return self.values[(idx[0]*4) + idx[1]] + else: + raise ValueError("Unexpected index. Expected tuple index [row,col].") + + def __mul__(self, other) -> 'Matrix4x4': + if not isinstance(other, Matrix4x4): + raise ValueError + result_numpy_array = numpy.matmul(self.as_numpy_array(), other.as_numpy_array()) + return Matrix4x4(values=list(result_numpy_array.flatten())) + + def get_translation(self) -> list[float]: + """ + Return a vector of [x,y,z] representing translation. + """ + a = self.values + return [a[3], a[7], a[11]] + + def inverse(self) -> 'Matrix4x4': + inv_numpy_array = numpy.linalg.inv(self.as_numpy_array()) + return Matrix4x4.from_numpy_array(inv_numpy_array) + + @staticmethod + def from_raw_values( + v00, v01, v02, v03, + v10, v11, v12, v13, + v20, v21, v22, v23, + v30, v31, v32, v33 + ) -> 'Matrix4x4': + return Matrix4x4(values=[ + v00, v01, v02, v03, + v10, v11, v12, v13, + v20, v21, v22, v23, + v30, v31, v32, v33]) + + @staticmethod + def from_list( + value_list: list[float] + ) -> 'Matrix4x4': + if len(value_list) != 16: + raise ValueError(f"Expected a list of 16 float. Got {str(value_list)}.") + return Matrix4x4(values=list(value_list)) + + @staticmethod + def from_numpy_array( + value_array: numpy.ndarray + ) -> 'Matrix4x4': + if len(value_array) != 4: + raise ValueError(f"Expected input array to have 4 rows. Got {len(value_array)}.") + for i in range(0, len(value_array)): + if len(value_array[i]) != 4: + raise ValueError(f"Expected input row {i} to have 4 col. Got {len(value_array[i])}.") + return Matrix4x4(values=list(value_array.flatten())) + + +class Pose(BaseModel): + target_id: str = Field() + object_to_reference_matrix: Matrix4x4 = Field() + solver_timestamp_utc_iso8601: str = Field() + + +class Ray: + source_point: list[float] + direction: list[float] + + def __init__( + self, + source_point: list[float], + direction: list[float], + epsilon: float = _DEFAULT_EPSILON + ): + direction_norm = numpy.linalg.norm(direction) + if direction_norm < epsilon: + raise ValueError("Direction cannot be zero.") + self.source_point = source_point + self.direction = direction + + +class Target(BaseModel): + """ + A trackable object. + """ + label: str + landmarks: list[Landmark] + + def get_landmark_point( + self, + feature_label: str + ) -> list[float]: + for landmark in self.landmarks: + if landmark.feature_label == feature_label: + return landmark.as_float_list() + raise ValueError class MathUtils: @@ -555,4 +708,3 @@ def square_marker_corner_points( [half_width, -half_width, 0., 1.], # Bottom-right [-half_width, -half_width, 0., 1.]] # Bottom-left return corner_points - diff --git a/src/common/mct_component.py b/src/common/mct_component.py index af55770..7e9a41d 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -11,17 +11,23 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from .status_messages import \ +from .image_processing import \ + Annotation, \ + ImageResolution +from .math import \ + Pose +from .serialization import \ + MCTDeserializable, \ + MCTSerializationError +from .status import \ SeverityLabel, \ StatusMessage, \ StatusMessageSource -from .structures import \ - MCTDeserializable, \ - MCTSerializationError import abc import datetime from fastapi import WebSocket, WebSocketDisconnect import logging +from pydantic import BaseModel, Field from typing import Callable, Optional, TypeVar @@ -32,6 +38,39 @@ T = TypeVar("T") +class DetectorFrame(BaseModel): + annotations: list[Annotation] = Field(default_factory=list) + timestamp_utc_iso8601: str = Field() + image_resolution: ImageResolution = Field() + + @property + def annotations_identified(self): + return [ + annotation + for annotation in self.annotations + if annotation.feature_label != Annotation.UNIDENTIFIED_LABEL] + + @property + def annotations_unidentified(self): + return [ + annotation + for annotation in self.annotations + if annotation.feature_label == Annotation.UNIDENTIFIED_LABEL] + + @property + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + +class PoseSolverFrame(BaseModel): + detector_poses: list[Pose] | None = Field() + target_poses: list[Pose] | None = Field() + timestamp_utc_iso8601: str = Field() + + def timestamp_utc(self): + return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) + + class MCTComponent(abc.ABC): _status_message_source: StatusMessageSource diff --git a/src/common/util/io_utils.py b/src/common/serialization.py similarity index 50% rename from src/common/util/io_utils.py rename to src/common/serialization.py index 9ab41d1..362299b 100644 --- a/src/common/util/io_utils.py +++ b/src/common/serialization.py @@ -1,7 +1,196 @@ +from .status import MCTError +import abc import hjson import json import os -from typing import Any, Callable, Literal +from pydantic import BaseModel, Field, ValidationError +from typing import Any, Final, Callable, Literal, TypeVar, Union + + +class KeyValueSimpleAbstract(BaseModel, abc.ABC): + """ + Abstract class to represent a key-value pair. + Intended use: Setting parameter over network, serialization through JSON. + """ + parsable_type: str + key: str = Field() + + +class KeyValueSimpleBool(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "bool" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: bool = Field() + + +class KeyValueSimpleFloat(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "float" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: float = Field() + + +class KeyValueSimpleInt(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "int" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: int = Field() + + +class KeyValueSimpleString(KeyValueSimpleAbstract): + _TYPE_IDENTIFIER: Final[str] = "str" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: str = Field() + + +class KeyValueMetaAbstract(BaseModel, abc.ABC): + """ + Abstract class to represent a key-value pair, and additional data about the datum (range, description, etc) + Intended use: Getting parameter over network, serialization through JSON. + """ + parsable_type: str + key: str = Field() + + @abc.abstractmethod + def to_simple(self) -> KeyValueSimpleAbstract: ... + + +class KeyValueMetaBool(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "bool" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: bool = Field() + + def to_simple(self) -> KeyValueSimpleBool: + return KeyValueSimpleBool(key=self.key, value=self.value) + + +class KeyValueMetaEnum(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "enum" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: str = Field() + allowable_values: list[str] = Field(default_factory=list) + + def to_simple(self) -> KeyValueSimpleString: + return KeyValueSimpleString(key=self.key, value=self.value) + + +class KeyValueMetaFloat(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "float" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: float = Field() + range_minimum: float = Field() + range_maximum: float = Field() + range_step: float = Field(default=1.0) + digit_count: int = Field(default=2) + + def to_simple(self) -> KeyValueSimpleFloat: + return KeyValueSimpleFloat(key=self.key, value=self.value) + + +class KeyValueMetaInt(KeyValueMetaAbstract): + _TYPE_IDENTIFIER: Final[str] = "int" + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + value: int = Field() + range_minimum: int = Field() + range_maximum: int = Field() + range_step: int = Field(default=1) + + def to_simple(self) -> KeyValueSimpleInt: + return KeyValueSimpleInt(key=self.key, value=self.value) + + +# pydantic doesn't appear to handle very well typing's (TypeA, TypeB, ...) notation of a union +KeyValueSimpleAny = Union[ + KeyValueSimpleBool, + KeyValueSimpleString, + KeyValueSimpleFloat, + KeyValueSimpleInt] +KeyValueMetaAny = Union[ + KeyValueMetaBool, + KeyValueMetaEnum, + KeyValueMetaFloat, + KeyValueMetaInt] + + +DeserializableT = TypeVar('DeserializableT', bound='MCTParsable') + + +class MCTSerializationError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + +class MCTDeserializable(abc.ABC): + + @staticmethod + @abc.abstractmethod + def type_identifier() -> str: + pass + + @staticmethod + def deserialize_series_list( + series_dict: dict, + supported_types: list[type[DeserializableT]] + ) -> list[DeserializableT]: + if "series" not in series_dict or not isinstance(series_dict["series"], list): + message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." + raise MCTSerializationError(message) + + output_series: list[DeserializableT] = list() + for parsable_dict in series_dict["series"]: + if not isinstance(parsable_dict, dict): + message: str = "series contained a non-dict element. Input is improperly formatted." + raise MCTSerializationError(message) + output_series.append(MCTDeserializable.deserialize_single( + single_dict=parsable_dict, + supported_types=supported_types)) + + return output_series + + @staticmethod + def deserialize_single( + single_dict: dict, + supported_types: list[type[DeserializableT]] + ) -> DeserializableT: + if "parsable_type" not in single_dict or not isinstance(single_dict["parsable_type"], str): + message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." + raise MCTSerializationError(message) from None + + for supported_type in supported_types: + if single_dict["parsable_type"] == supported_type.parsable_type_identifier(): + request: DeserializableT + try: + request = supported_type(**single_dict) + except ValidationError as e: + raise MCTSerializationError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None + return request + + message: str = "parsable_type did not match any expected value. Input is improperly formatted." + raise MCTSerializationError(message) class IOUtils: diff --git a/src/common/status_messages.py b/src/common/status.py similarity index 97% rename from src/common/status_messages.py rename to src/common/status.py index 1350c6c..fe34e1b 100644 --- a/src/common/status_messages.py +++ b/src/common/status.py @@ -8,6 +8,11 @@ logger = logging.getLogger(__name__) +class MCTError(Exception): + def __init__(self, *args): + super().__init__(*args) + + class SeverityLabel(StrEnum): DEBUG = "debug" INFO = "info" diff --git a/src/common/structures/__init__.py b/src/common/structures/__init__.py deleted file mode 100644 index ef9b30c..0000000 --- a/src/common/structures/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -from .image import \ - Annotation, \ - ImageFormat, \ - ImageResolution, \ - IntrinsicCalibration, \ - IntrinsicParameters -from .linear_algebra import \ - IterativeClosestPointParameters, \ - Landmark, \ - Matrix4x4, \ - Pose, \ - Ray, \ - Target -from .serialization import \ - KeyValueSimpleAbstract, \ - KeyValueSimpleAny, \ - KeyValueSimpleBool, \ - KeyValueSimpleString, \ - KeyValueSimpleFloat, \ - KeyValueSimpleInt, \ - KeyValueMetaAbstract, \ - KeyValueMetaAny, \ - KeyValueMetaBool, \ - KeyValueMetaEnum, \ - KeyValueMetaFloat, \ - KeyValueMetaInt, \ - MCTSerializationError, \ - MCTDeserializable -from .tracking import \ - Annotation, \ - DetectorFrame, \ - PoseSolverFrame diff --git a/src/common/structures/image.py b/src/common/structures/image.py deleted file mode 100644 index b4b33e5..0000000 --- a/src/common/structures/image.py +++ /dev/null @@ -1,149 +0,0 @@ -from enum import StrEnum -import math -from pydantic import BaseModel, Field -from typing import ClassVar - - -class Annotation(BaseModel): - """ - A distinct point as detected on a detector image. - """ - - # These can denote that multiple landmarks are related if they share the same - # "base label" (the part before the first and only occurrence of this character). - RELATION_CHARACTER: ClassVar[str] = "$" - - UNIDENTIFIED_LABEL: ClassVar[str] = str() - - feature_label: str = Field() # Empty indicates that something was detected but not identified - x_px: float = Field() - y_px: float = Field() - - def base_feature_label(self) -> str: - """ - Part of the label before the RELATION_CHARACTER. - """ - if self.RELATION_CHARACTER not in self.feature_label: - return self.feature_label - return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] - - -class ImageFormat(StrEnum): - FORMAT_PNG = ".png" - FORMAT_JPG = ".jpg" - - -class ImageResolution(BaseModel): - x_px: int = Field() - y_px: int = Field() - - def __eq__(self, other) -> bool: - if type(self) is not type(other): - return False - return \ - self.x_px == other.x_px and \ - self.y_px == other.y_px - - def __hash__(self) -> int: - return hash(str(self)) - - def __lt__(self, other): - if not isinstance(other, ImageResolution): - raise ValueError() - if self.x_px < other.x_px: - return True - elif self.x_px > other.x_px: - return False - elif self.y_px < other.y_px: - return True - else: - return False - - def __str__(self): - return f"{self.x_px}x{self.y_px}" - - @staticmethod - def from_str(in_str: str) -> 'ImageResolution': - if 'x' not in in_str: - raise ValueError("in_str is expected to contain delimiter 'x'.") - parts: list[str] = in_str.split('x') - if len(parts) > 2: - raise ValueError("in_str is expected to contain exactly one 'x'.") - x_px = int(parts[0]) - y_px = int(parts[1]) - return ImageResolution(x_px=x_px, y_px=y_px) - - -class IntrinsicParameters(BaseModel): - """ - Camera intrinsic parameters (focal length, optical center, distortion coefficients). - See OpenCV's documentation: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html - See Wikipedia article: https://en.wikipedia.org/wiki/Distortion_%28optics%29 - """ - - focal_length_x_px: float = Field() - focal_length_y_px: float = Field() - optical_center_x_px: float = Field() - optical_center_y_px: float = Field() - - radial_distortion_coefficients: list[float] = Field() # k1, k2, k3 etc in OpenCV - - tangential_distortion_coefficients: list[float] = Field() # p1, p2 in OpenCV - - def as_array(self) -> list[float]: - return_value: list[float] = [ - self.focal_length_x_px, - self.focal_length_y_px, - self.optical_center_x_px, - self.optical_center_y_px] - return_value += self.get_distortion_coefficients() - return return_value - - def get_matrix(self) -> list[list[float]]: - """calibration matrix expected by OpenCV in some operations""" - return \ - [[self.focal_length_x_px, 0.0, self.optical_center_x_px], - [0.0, self.focal_length_y_px, self.optical_center_y_px], - [0.0, 0.0, 1.0]] - - def get_distortion_coefficients(self) -> list[float]: - """ - Distortion coefficients in array format expected by OpenCV in some operations. - See https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d - calibrateCamera() documentation describes order of distortion coefficients that OpenCV works with - """ - coefficients: list[float] = [ - self.radial_distortion_coefficients[0], - self.radial_distortion_coefficients[1], - self.tangential_distortion_coefficients[0], - self.tangential_distortion_coefficients[1]] - coefficients += self.radial_distortion_coefficients[2:] - return coefficients - - @staticmethod - def generate_zero_parameters( - resolution_x_px: int, - resolution_y_px: int, - fov_x_degrees: float = 45.0, - fov_y_degrees: float = 45.0 - ) -> "IntrinsicParameters": - optical_center_x_px: int = int(round(resolution_x_px/2.0)) - fov_x_radians: float = fov_x_degrees * math.pi / 180.0 - focal_length_x_px = (resolution_x_px / 2.0) / math.tan(fov_x_radians / 2.0) - optical_center_y_px: int = int(round(resolution_y_px/2.0)) - fov_y_radians: float = fov_y_degrees * math.pi / 180.0 - focal_length_y_px = (resolution_y_px / 2.0) / math.tan(fov_y_radians / 2.0) - return IntrinsicParameters( - focal_length_x_px=focal_length_x_px, - focal_length_y_px=focal_length_y_px, - optical_center_x_px=optical_center_x_px, - optical_center_y_px=optical_center_y_px, - radial_distortion_coefficients=[0.0, 0.0, 0.0], - tangential_distortion_coefficients=[0.0, 0.0]) - - -class IntrinsicCalibration(BaseModel): - timestamp_utc: str = Field() - image_resolution: ImageResolution = Field() - calibrated_values: IntrinsicParameters = Field() - supplemental_data: dict = Field() diff --git a/src/common/structures/linear_algebra.py b/src/common/structures/linear_algebra.py deleted file mode 100644 index 316cafc..0000000 --- a/src/common/structures/linear_algebra.py +++ /dev/null @@ -1,163 +0,0 @@ -import numpy -from pydantic import BaseModel, Field -from typing import ClassVar, Final - - -_DEFAULT_EPSILON: Final[float] = 0.0001 - - -class IterativeClosestPointParameters(BaseModel): - # ICP will stop after this many iterations - termination_iteration_count: int = Field() - - # ICP will stop if distance *and* angle difference from one iteration to the next - # is smaller than these - termination_delta_translation: float = Field() - termination_delta_rotation_radians: float = Field() - - # ICP will stop if overall point-to-point distance (between source and target) - # mean *or* root-mean-square is less than specified - termination_mean_point_distance: float = Field() - termination_rms_point_distance: float = Field() # root-mean-square - - -class Landmark(BaseModel): - - # These can denote that multiple landmarks are related if they share the same - # "base label" (the part before the first and only occurrence of this character). - RELATION_CHARACTER: ClassVar[str] = "$" - - """ - A distinct point in 3D space. - Coordinates are in the unit of the user's choosing. - """ - feature_label: str = Field() - x: float = Field() - y: float = Field() - z: float = Field() - - def as_float_list(self) -> list[float]: - return [self.x, self.y, self.z] - - def base_feature_label(self) -> str: - """ - Part of the label before the RELATION_CHARACTER. - """ - if self.RELATION_CHARACTER not in self.feature_label: - return self.feature_label - return self.feature_label[0:self.feature_label.index(self.RELATION_CHARACTER)] - - -class Matrix4x4(BaseModel): - - @staticmethod - def _identity_values() -> list[float]: - return \ - [1.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0] - values: list[float] = Field(default_factory=_identity_values) - - def as_numpy_array(self): - a = self.values - return numpy.asarray( - [[a[0], a[1], a[2], a[3]], - [a[4], a[5], a[6], a[7]], - [a[8], a[9], a[10], a[11]], - [a[12], a[13], a[14], a[15]]]) - - def __getitem__(self, idx: tuple[int, int]) -> float: - if isinstance(idx, tuple): - return self.values[(idx[0]*4) + idx[1]] - else: - raise ValueError("Unexpected index. Expected tuple index [row,col].") - - def __mul__(self, other) -> 'Matrix4x4': - if not isinstance(other, Matrix4x4): - raise ValueError - result_numpy_array = numpy.matmul(self.as_numpy_array(), other.as_numpy_array()) - return Matrix4x4(values=list(result_numpy_array.flatten())) - - def get_translation(self) -> list[float]: - """ - Return a vector of [x,y,z] representing translation. - """ - a = self.values - return [a[3], a[7], a[11]] - - def inverse(self) -> 'Matrix4x4': - inv_numpy_array = numpy.linalg.inv(self.as_numpy_array()) - return Matrix4x4.from_numpy_array(inv_numpy_array) - - @staticmethod - def from_raw_values( - v00, v01, v02, v03, - v10, v11, v12, v13, - v20, v21, v22, v23, - v30, v31, v32, v33 - ) -> 'Matrix4x4': - return Matrix4x4(values=[ - v00, v01, v02, v03, - v10, v11, v12, v13, - v20, v21, v22, v23, - v30, v31, v32, v33]) - - @staticmethod - def from_list( - value_list: list[float] - ) -> 'Matrix4x4': - if len(value_list) != 16: - raise ValueError(f"Expected a list of 16 float. Got {str(value_list)}.") - return Matrix4x4(values=list(value_list)) - - @staticmethod - def from_numpy_array( - value_array: numpy.ndarray - ) -> 'Matrix4x4': - if len(value_array) != 4: - raise ValueError(f"Expected input array to have 4 rows. Got {len(value_array)}.") - for i in range(0, len(value_array)): - if len(value_array[i]) != 4: - raise ValueError(f"Expected input row {i} to have 4 col. Got {len(value_array[i])}.") - return Matrix4x4(values=list(value_array.flatten())) - - -class Pose(BaseModel): - target_id: str = Field() - object_to_reference_matrix: Matrix4x4 = Field() - solver_timestamp_utc_iso8601: str = Field() - - -class Ray: - source_point: list[float] - direction: list[float] - - def __init__( - self, - source_point: list[float], - direction: list[float], - epsilon: float = _DEFAULT_EPSILON - ): - direction_norm = numpy.linalg.norm(direction) - if direction_norm < epsilon: - raise ValueError("Direction cannot be zero.") - self.source_point = source_point - self.direction = direction - - -class Target(BaseModel): - """ - A trackable object. - """ - label: str - landmarks: list[Landmark] - - def get_landmark_point( - self, - feature_label: str - ) -> list[float]: - for landmark in self.landmarks: - if landmark.feature_label == feature_label: - return landmark.as_float_list() - raise ValueError diff --git a/src/common/structures/serialization.py b/src/common/structures/serialization.py deleted file mode 100644 index 01a7415..0000000 --- a/src/common/structures/serialization.py +++ /dev/null @@ -1,190 +0,0 @@ -from src.common.exceptions import MCTError -import abc -from pydantic import BaseModel, Field, ValidationError -from typing import Final, Literal, TypeVar, Union - - -class KeyValueSimpleAbstract(BaseModel, abc.ABC): - """ - Abstract class to represent a key-value pair. - Intended use: Setting parameter over network, serialization through JSON. - """ - parsable_type: str - key: str = Field() - - -class KeyValueSimpleBool(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "bool" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: bool = Field() - - -class KeyValueSimpleFloat(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "float" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: float = Field() - - -class KeyValueSimpleInt(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "int" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: int = Field() - - -class KeyValueSimpleString(KeyValueSimpleAbstract): - _TYPE_IDENTIFIER: Final[str] = "str" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: str = Field() - - -class KeyValueMetaAbstract(BaseModel, abc.ABC): - """ - Abstract class to represent a key-value pair, and additional data about the datum (range, description, etc) - Intended use: Getting parameter over network, serialization through JSON. - """ - parsable_type: str - key: str = Field() - - @abc.abstractmethod - def to_simple(self) -> KeyValueSimpleAbstract: ... - - -class KeyValueMetaBool(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "bool" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: bool = Field() - - def to_simple(self) -> KeyValueSimpleBool: - return KeyValueSimpleBool(key=self.key, value=self.value) - - -class KeyValueMetaEnum(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "enum" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: str = Field() - allowable_values: list[str] = Field(default_factory=list) - - def to_simple(self) -> KeyValueSimpleString: - return KeyValueSimpleString(key=self.key, value=self.value) - - -class KeyValueMetaFloat(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "float" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: float = Field() - range_minimum: float = Field() - range_maximum: float = Field() - range_step: float = Field(default=1.0) - digit_count: int = Field(default=2) - - def to_simple(self) -> KeyValueSimpleFloat: - return KeyValueSimpleFloat(key=self.key, value=self.value) - - -class KeyValueMetaInt(KeyValueMetaAbstract): - _TYPE_IDENTIFIER: Final[str] = "int" - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - value: int = Field() - range_minimum: int = Field() - range_maximum: int = Field() - range_step: int = Field(default=1) - - def to_simple(self) -> KeyValueSimpleInt: - return KeyValueSimpleInt(key=self.key, value=self.value) - - -# pydantic doesn't appear to handle very well typing's (TypeA, TypeB, ...) notation of a union -KeyValueSimpleAny = Union[ - KeyValueSimpleBool, - KeyValueSimpleString, - KeyValueSimpleFloat, - KeyValueSimpleInt] -KeyValueMetaAny = Union[ - KeyValueMetaBool, - KeyValueMetaEnum, - KeyValueMetaFloat, - KeyValueMetaInt] - - -DeserializableT = TypeVar('DeserializableT', bound='MCTParsable') - - -class MCTSerializationError(MCTError): - message: str - - def __init__(self, message: str, *args): - super().__init__(args) - self.message = message - - -class MCTDeserializable(abc.ABC): - - @staticmethod - @abc.abstractmethod - def type_identifier() -> str: - pass - - @staticmethod - def deserialize_series_list( - series_dict: dict, - supported_types: list[type[DeserializableT]] - ) -> list[DeserializableT]: - if "series" not in series_dict or not isinstance(series_dict["series"], list): - message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." - raise MCTSerializationError(message) - - output_series: list[DeserializableT] = list() - for parsable_dict in series_dict["series"]: - if not isinstance(parsable_dict, dict): - message: str = "series contained a non-dict element. Input is improperly formatted." - raise MCTSerializationError(message) - output_series.append(MCTDeserializable.deserialize_single( - single_dict=parsable_dict, - supported_types=supported_types)) - - return output_series - - @staticmethod - def deserialize_single( - single_dict: dict, - supported_types: list[type[DeserializableT]] - ) -> DeserializableT: - if "parsable_type" not in single_dict or not isinstance(single_dict["parsable_type"], str): - message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." - raise MCTSerializationError(message) from None - - for supported_type in supported_types: - if single_dict["parsable_type"] == supported_type.parsable_type_identifier(): - request: DeserializableT - try: - request = supported_type(**single_dict) - except ValidationError as e: - raise MCTSerializationError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None - return request - - message: str = "parsable_type did not match any expected value. Input is improperly formatted." - raise MCTSerializationError(message) diff --git a/src/common/structures/tracking.py b/src/common/structures/tracking.py deleted file mode 100644 index eab389f..0000000 --- a/src/common/structures/tracking.py +++ /dev/null @@ -1,37 +0,0 @@ -from .image import Annotation, ImageResolution -from .linear_algebra import Pose -import datetime -from pydantic import BaseModel, Field - - -class DetectorFrame(BaseModel): - annotations: list[Annotation] = Field(default_factory=list) - timestamp_utc_iso8601: str = Field() - image_resolution: ImageResolution = Field() - - @property - def annotations_identified(self): - return [ - annotation - for annotation in self.annotations - if annotation.feature_label != Annotation.UNIDENTIFIED_LABEL] - - @property - def annotations_unidentified(self): - return [ - annotation - for annotation in self.annotations - if annotation.feature_label == Annotation.UNIDENTIFIED_LABEL] - - @property - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) - - -class PoseSolverFrame(BaseModel): - detector_poses: list[Pose] | None = Field() - target_poses: list[Pose] | None = Field() - timestamp_utc_iso8601: str = Field() - - def timestamp_utc(self): - return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) diff --git a/src/common/util/__init__.py b/src/common/util/__init__.py deleted file mode 100644 index d91b4e7..0000000 --- a/src/common/util/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .image_utils import ImageUtils -from .io_utils import IOUtils -from .math_utils import MathUtils diff --git a/src/common/util/image_utils.py b/src/common/util/image_utils.py deleted file mode 100644 index ed13c88..0000000 --- a/src/common/util/image_utils.py +++ /dev/null @@ -1,142 +0,0 @@ -from src.common.structures import \ - ImageFormat, \ - ImageResolution -import base64 -import cv2 -import logging -import numpy -from typing import Literal, Final - - -logger = logging.getLogger(__file__) - -ColorMode = Literal["color", "greyscale"] - - -class ImageUtils: - """ - A "class" to group related static functions, like in a namespace. - The class itself is not meant to be instantiated. - """ - - def __init__(self): - raise RuntimeError(f"{__class__.__name__} is not meant to be instantiated.") - - @staticmethod - def base64_to_image( - input_base64: str, - color_mode: ColorMode = "color" - ) -> numpy.ndarray: - """ - Assumes 8 bits per component - """ - - image_bytes: bytes = base64.b64decode(s=input_base64) - - color_flag: int = 0 - if color_mode == "color": - color_flag |= cv2.IMREAD_COLOR - elif color_mode == "greyscale": - color_flag |= cv2.IMREAD_GRAYSCALE - else: - logger.warning(f"Unsupported color mode specified: {color_mode}") - - opencv_image: numpy.ndarray = cv2.imdecode( - buf=numpy.frombuffer( - buffer=image_bytes, - dtype=numpy.uint8), - flags=color_flag) - return opencv_image - - @staticmethod - def black_image( - resolution_px: tuple[int, int], - ) -> numpy.ndarray: - return numpy.zeros((resolution_px[1], resolution_px[0], 3), dtype=numpy.uint8) - - @staticmethod - def bytes_to_base64( - image_bytes: bytes - ) -> str: - return base64.b64encode(image_bytes).decode("ascii") - - @staticmethod - def image_resize_to_fit( - opencv_image: numpy.ndarray, - available_size: tuple[int, int] # x, y - ) -> numpy.ndarray: - # note: opencv height represented by 1st dimension - source_resolution_px: tuple[int, int] = (opencv_image.shape[1], opencv_image.shape[0]) - image_width_px, image_height_px = ImageUtils.scale_factor_for_available_space_px( - source_resolution_px=source_resolution_px, - available_size_px=available_size) - return cv2.resize( - src=opencv_image, - dsize=(image_width_px, image_height_px)) - - @staticmethod - def image_to_base64( - image_data: numpy.ndarray, - image_format: ImageFormat = ".png", - ) -> str: - """ - :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) - :param image_format: e.g. ".jpg", ".png"... - :return: base64 string representing the image - """ - encoded_image_rgb_bytes: bytes = ImageUtils.image_to_bytes( - image_data=image_data, - image_format=image_format) - encoded_image_rgb_base64: str = ImageUtils.bytes_to_base64(encoded_image_rgb_bytes) - return encoded_image_rgb_base64 - - @staticmethod - def image_to_bytes( - image_data: numpy.ndarray, - image_format: ImageFormat = ".png", - ) -> bytes: - """ - :param image_data: Expected to be an OpenCV image *or* a numpy.ndarray (theoretically - to be confirmed) - :param image_format: e.g. ".jpg", ".png"... - :return: base64 string representing the image - """ - encoded_image_rgb_single_row: numpy.array - encoded, encoded_image_rgb_single_row = cv2.imencode(image_format, image_data) - encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() - return encoded_image_rgb_bytes - - @staticmethod - def scale_factor_for_available_space_px( - source_resolution_px: tuple[int, int], - available_size_px: tuple[int, int] - ) -> tuple[int, int]: - source_width_px: int = source_resolution_px[0] - source_height_px: int = source_resolution_px[1] - available_width_px: int = available_size_px[0] - available_height_px: int = available_size_px[1] - scale: float = min( - available_width_px / float(source_width_px), - available_height_px / float(source_height_px)) - return int(round(source_width_px * scale)), int(round(source_height_px * scale)) - - class StandardResolutions: - RES_640X360: Final[ImageResolution] = ImageResolution(x_px=640, y_px=360) - RES_640X480: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) - RES_800X600: Final[ImageResolution] = ImageResolution(x_px=800, y_px=600) - RES_1024X768: Final[ImageResolution] = ImageResolution(x_px=1024, y_px=768) - RES_1280X720: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=720) - RES_1280X800: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=800) - RES_1280X1024: Final[ImageResolution] = ImageResolution(x_px=1280, y_px=1024) - RES_1920X1080: Final[ImageResolution] = ImageResolution(x_px=1920, y_px=1080) - - @staticmethod - def as_list(): - return [ - ImageUtils.StandardResolutions.RES_640X360, - ImageUtils.StandardResolutions.RES_640X480, - ImageUtils.StandardResolutions.RES_800X600, - ImageUtils.StandardResolutions.RES_1024X768, - ImageUtils.StandardResolutions.RES_1280X720, - ImageUtils.StandardResolutions.RES_1280X800, - ImageUtils.StandardResolutions.RES_1280X1024, - ImageUtils.StandardResolutions.RES_1920X1080] diff --git a/src/controller/__init__.py b/src/controller/__init__.py index 1e10a09..f809d53 100644 --- a/src/controller/__init__.py +++ b/src/controller/__init__.py @@ -6,5 +6,4 @@ MCTComponentConfig, \ MCTConfiguration, \ StartupMode -from .exceptions import ResponseSeriesNotExpected from .mct_controller import MCTController diff --git a/src/controller/configuration.py b/src/controller/configuration.py index b584467..4c5bbbd 100644 --- a/src/controller/configuration.py +++ b/src/controller/configuration.py @@ -1,4 +1,4 @@ -from src.common.structures import \ +from src.common import \ KeyValueSimpleAny, \ Matrix4x4, \ Target diff --git a/src/controller/connection.py b/src/controller/connection.py index 77cb17f..696e8f7 100644 --- a/src/controller/connection.py +++ b/src/controller/connection.py @@ -1,24 +1,23 @@ from src.common import \ DequeueStatusMessagesResponse, \ + DetectorFrame, \ EmptyResponse, \ ErrorResponse, \ - MCTRequest, \ - MCTRequestSeries, \ - MCTResponse, \ - MCTResponseSeries, \ - SeverityLabel, \ - StatusMessage, \ - TimestampGetResponse -from src.common.structures import \ - DetectorFrame, \ ImageResolution, \ IntrinsicParameters, \ KeyValueSimpleAny, \ Matrix4x4, \ MCTDeserializable, \ + MCTRequest, \ + MCTRequestSeries, \ + MCTResponse, \ + MCTResponseSeries, \ Pose, \ PoseSolverFrame, \ - Target + SeverityLabel, \ + StatusMessage, \ + Target, \ + TimestampGetResponse from src.detector.api import \ CalibrationCalculateResponse, \ CalibrationImageAddResponse, \ diff --git a/src/controller/exceptions.py b/src/controller/exceptions.py deleted file mode 100644 index d2f3abc..0000000 --- a/src/controller/exceptions.py +++ /dev/null @@ -1,5 +0,0 @@ -from src.common.exceptions import MCTError - - -class ResponseSeriesNotExpected(MCTError): - pass diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 4967de7..eae1017 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -1,4 +1,3 @@ -from .exceptions import ResponseSeriesNotExpected from .configuration import \ MCTComponentConfig, \ MCTConfiguration, \ @@ -8,23 +7,23 @@ DetectorConnection, \ PoseSolverConnection from src.common import \ + DetectorFrame, \ EmptyResponse, \ ErrorResponse, \ + IntrinsicParameters, \ MCTComponent, \ + MCTError, \ MCTRequest, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ + PoseSolverFrame, \ SeverityLabel, \ StatusMessageSource, \ TimestampGetRequest, \ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest -from src.common.structures import \ - DetectorFrame, \ - IntrinsicParameters, \ - PoseSolverFrame from src.detector import \ CalibrationResultGetActiveRequest, \ CalibrationResultGetActiveResponse, \ @@ -63,6 +62,10 @@ _TIME_SYNC_SAMPLE_MAXIMUM_COUNT: Final[int] = 5 +class ResponseSeriesNotExpected(MCTError): + pass + + class MCTController(MCTComponent): class Status(StrEnum): diff --git a/src/detector/api.py b/src/detector/api.py index 4c35485..9ba82db 100644 --- a/src/detector/api.py +++ b/src/detector/api.py @@ -1,14 +1,13 @@ from src.common import \ - IntrinsicCalibrator, \ - MCTRequest, \ - MCTResponse -from src.common.structures import \ - ImageFormat, \ DetectorFrame, \ - IntrinsicCalibration, \ + ImageFormat, \ ImageResolution, \ + IntrinsicCalibration, \ + IntrinsicCalibrator, \ KeyValueMetaAny, \ - KeyValueSimpleAny + KeyValueSimpleAny, \ + MCTRequest, \ + MCTResponse from pydantic import Field, SerializeAsAny from typing import Final, Literal diff --git a/src/detector/detector.py b/src/detector/detector.py index de81b2e..c207b36 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -36,20 +36,21 @@ from src.common import \ Annotator, \ Camera, \ + DetectorFrame, \ EmptyResponse, \ ErrorResponse, \ + ImageFormat, \ + ImageResolution, \ + IntrinsicCalibration, \ IntrinsicCalibrator, \ + KeyValueMetaAbstract, \ MCTIntrinsicCalibrationError, \ MCTCameraRuntimeError, \ MCTComponent, \ MCTAnnotatorRuntimeError, \ MCTRequest, \ - MCTResponse -from src.common.structures import \ - DetectorFrame, \ - ImageResolution, \ - IntrinsicCalibration, \ - KeyValueMetaAbstract + MCTResponse, \ + SeverityLabel import logging from typing import Callable, Final from pydantic import BaseModel, Field @@ -130,7 +131,9 @@ def calibration_delete_staged(self, **_kwargs) -> EmptyResponse | ErrorResponse: def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | ErrorResponse: try: - image_base64: str = self._camera.get_encoded_image(image_format=".png", requested_resolution=None) + image_base64: str = self._camera.get_encoded_image( + image_format=ImageFormat.FORMAT_PNG, + requested_resolution=None) image_identifier: str = self._calibrator.add_image(image_base64=image_base64) except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) @@ -364,13 +367,17 @@ async def update(self): try: self._camera.update() except MCTCameraRuntimeError as e: - self.add_status_message(severity="error", message=e.message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=e.message) if self._annotator.get_status() == Annotator.Status.RUNNING and \ self._camera.get_changed_timestamp() > self._annotator.get_changed_timestamp(): try: self._annotator.update(self._camera.get_image()) except MCTAnnotatorRuntimeError as e: - self.add_status_message(severity="error", message=e.message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=e.message) self._frame_count += 1 if self._frame_count % 1000 == 0: print(f"Update count: {self._frame_count}") diff --git a/src/detector/detector_app.py b/src/detector/detector_app.py index 84ca601..d9f27e7 100644 --- a/src/detector/detector_app.py +++ b/src/detector/detector_app.py @@ -16,6 +16,7 @@ Annotator, \ EmptyResponse, \ ErrorResponse, \ + ImageFormat, \ TimestampGetRequest, \ TimestampGetResponse, \ TimeSyncStartRequest, \ @@ -119,7 +120,7 @@ async def calibration_get_result_active() -> CalibrationResultGetActiveResponse: @detector_app.get("/camera/get_image") async def camera_get_image() -> CameraImageGetResponse: result: CameraImageGetResponse = detector.camera_image_get( - request=CameraImageGetRequest(format=".png")) + request=CameraImageGetRequest(format=ImageFormat.FORMAT_PNG)) image_bytes = base64.b64decode(result.image_base64) with open("test.png", "wb") as image_file: image_file.write(image_bytes) diff --git a/src/gui/panels/base_panel.py b/src/gui/panels/base_panel.py index 74c7e79..8727b3b 100644 --- a/src/gui/panels/base_panel.py +++ b/src/gui/panels/base_panel.py @@ -7,10 +7,6 @@ ParameterText from src.common import \ ErrorResponse, \ - MCTResponse, \ - SeverityLabel, \ - StatusMessageSource -from src.common.structures import \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ KeyValueSimpleBool, \ @@ -22,7 +18,10 @@ KeyValueMetaBool, \ KeyValueMetaEnum, \ KeyValueMetaFloat, \ - KeyValueMetaInt + KeyValueMetaInt, \ + MCTResponse, \ + SeverityLabel, \ + StatusMessageSource from typing import Final import wx diff --git a/src/gui/panels/board_builder_panel.py b/src/gui/panels/board_builder_panel.py index fc168e4..234cedd 100644 --- a/src/gui/panels/board_builder_panel.py +++ b/src/gui/panels/board_builder_panel.py @@ -14,15 +14,14 @@ MCTResponse, \ MCTResponseSeries from src.common import \ - ImageUtils, \ - StatusMessageSource -from src.common.structures import \ + Annotation, \ DetectorFrame, \ ImageResolution, \ - Annotation, \ + ImageUtils, \ Matrix4x4, \ + Pose, \ PoseSolverFrame, \ - Pose + StatusMessageSource from src.controller import MCTController from src.detector.api import \ CameraImageGetRequest, \ diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/calibrator_panel.py index a35517e..c4e986b 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/calibrator_panel.py @@ -11,14 +11,13 @@ from src.common import \ ErrorResponse, \ EmptyResponse, \ + ImageResolution, \ ImageUtils, \ IntrinsicCalibrator, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ StatusMessageSource -from src.common.structures import \ - ImageResolution from src.controller import \ MCTController from src.detector import \ diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index e2bd900..1e2384c 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -8,19 +8,18 @@ ParameterSpinboxFloat, \ ParameterSelector from src.common import \ + Annotation, \ + DetectorFrame, \ ErrorResponse, \ EmptyResponse, \ + ImageFormat, \ + ImageResolution, \ ImageUtils, \ + KeyValueSimpleAny, \ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ StatusMessageSource -from src.common.structures import \ - DetectorFrame, \ - ImageFormat, \ - ImageResolution, \ - KeyValueSimpleAny, \ - Annotation from src.controller import \ MCTController from src.detector.api import \ diff --git a/src/gui/panels/pose_solver_panel.py b/src/gui/panels/pose_solver_panel.py index eae3022..0e25508 100644 --- a/src/gui/panels/pose_solver_panel.py +++ b/src/gui/panels/pose_solver_panel.py @@ -7,15 +7,14 @@ TrackingTable, \ TrackingTableRow from src.common import \ + DetectorFrame, \ ErrorResponse, \ EmptyResponse, \ + Matrix4x4, \ MCTResponse, \ MCTResponseSeries, \ + StatusMessageSource, \ SeverityLabel, \ - StatusMessageSource -from src.common.structures import \ - DetectorFrame, \ - Matrix4x4, \ Pose, \ PoseSolverFrame from src.controller import \ diff --git a/src/gui/panels/specialized/graphics_renderer.py b/src/gui/panels/specialized/graphics_renderer.py index f5a126b..6c7c8ec 100644 --- a/src/gui/panels/specialized/graphics_renderer.py +++ b/src/gui/panels/specialized/graphics_renderer.py @@ -1,4 +1,4 @@ -from src.common.structures import Matrix4x4 +from src.common import Matrix4x4 from src.gui.graphics import Constants, FileIO, Material, Model, Shader import datetime import hjson @@ -208,8 +208,8 @@ def load_models_into_context_from_data_path(self) -> dict[str, Model]: geometry_filename = f"{model_part_io.geometry_label}.stl" geometry_filepath = os.path.join(geometry_path, geometry_filename) geometry = stl.mesh.Mesh.from_file(geometry_filepath) - vertices: list[numpy.array] = list() - normals: list[numpy.array] = list() + vertices: list[numpy.ndarray] = list() + normals: list[numpy.ndarray] = list() triangles: list[list[int]] = list() for i, face in enumerate(geometry.vectors): for vertex in face: diff --git a/src/implementations/annotator_aruco_opencv.py b/src/implementations/annotator_aruco_opencv.py index 8ede806..4d5ef19 100644 --- a/src/implementations/annotator_aruco_opencv.py +++ b/src/implementations/annotator_aruco_opencv.py @@ -1,13 +1,12 @@ from .common_aruco_opencv import ArucoOpenCVCommon from src.common import \ Annotator, \ + Annotation, \ + KeyValueMetaAny, \ + KeyValueSimpleAny, \ MCTAnnotatorRuntimeError, \ SeverityLabel, \ StatusMessageSource -from src.common.structures import \ - Annotation, \ - KeyValueMetaAny, \ - KeyValueSimpleAny import cv2.aruco import datetime import logging diff --git a/src/implementations/camera_opencv_capture_device.py b/src/implementations/camera_opencv_capture_device.py index 888c879..6974fa2 100644 --- a/src/implementations/camera_opencv_capture_device.py +++ b/src/implementations/camera_opencv_capture_device.py @@ -1,9 +1,6 @@ from src.common import \ Camera, \ ImageUtils, \ - MCTCameraRuntimeError, \ - StatusMessageSource -from src.common.structures import \ ImageResolution, \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ @@ -15,7 +12,9 @@ KeyValueMetaBool, \ KeyValueMetaEnum, \ KeyValueMetaFloat, \ - KeyValueMetaInt + KeyValueMetaInt, \ + MCTCameraRuntimeError, \ + StatusMessageSource, SeverityLabel import cv2 import datetime import logging @@ -286,7 +285,9 @@ def update(self) -> None: grabbed_frame = self._capture.grab() if not grabbed_frame: message: str = "Failed to grab frame." - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) self.set_status(Camera.Status.FAILURE) raise MCTCameraRuntimeError(message=message) @@ -294,7 +295,9 @@ def update(self) -> None: retrieved_frame, self._image = self._capture.retrieve() if not retrieved_frame: message: str = "Failed to retrieve frame." - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) self.set_status(Camera.Status.FAILURE) raise MCTCameraRuntimeError(message=message) diff --git a/src/implementations/camera_picamera2.py b/src/implementations/camera_picamera2.py index b1846c7..2f8c8c7 100644 --- a/src/implementations/camera_picamera2.py +++ b/src/implementations/camera_picamera2.py @@ -1,8 +1,5 @@ from src.common import \ Camera, \ - MCTCameraRuntimeError, \ - StatusMessageSource -from src.common.structures import \ ImageResolution, \ KeyValueSimpleAbstract, \ KeyValueSimpleAny, \ @@ -12,7 +9,10 @@ KeyValueMetaAbstract, \ KeyValueMetaBool, \ KeyValueMetaFloat, \ - KeyValueMetaInt + KeyValueMetaInt, \ + MCTCameraRuntimeError, \ + SeverityLabel, \ + StatusMessageSource import datetime import logging import numpy @@ -271,7 +271,9 @@ def update(self) -> None: if self._image is None: message: str = "Failed to grab frame." - self.add_status_message(severity="error", message=message) + self.add_status_message( + severity=SeverityLabel.ERROR, + message=message) self.set_status(Camera.Status.FAILURE) raise MCTCameraRuntimeError(message=message) diff --git a/src/implementations/common_aruco_opencv.py b/src/implementations/common_aruco_opencv.py index 734952f..47fa667 100644 --- a/src/implementations/common_aruco_opencv.py +++ b/src/implementations/common_aruco_opencv.py @@ -1,5 +1,4 @@ -from src.common import MathUtils -from src.common.structures import \ +from src.common import \ KeyValueMetaAny, \ KeyValueMetaBool, \ KeyValueMetaEnum, \ @@ -12,6 +11,7 @@ KeyValueSimpleInt, \ KeyValueSimpleString, \ Landmark, \ + MathUtils, \ MCTSerializationError, \ Target import cv2.aruco diff --git a/src/implementations/intrinsic_charuco_opencv.py b/src/implementations/intrinsic_charuco_opencv.py index 091bd62..7b3325f 100644 --- a/src/implementations/intrinsic_charuco_opencv.py +++ b/src/implementations/intrinsic_charuco_opencv.py @@ -1,11 +1,11 @@ from .common_aruco_opencv import ArucoOpenCVCommon from src.common import \ - IntrinsicCalibrator, \ - MCTIntrinsicCalibrationError -from src.common.structures import \ ImageResolution, \ IntrinsicCalibration, \ - IntrinsicParameters + IntrinsicCalibrator, \ + IntrinsicParameters, \ + MCTIntrinsicCalibrationError, \ + SeverityLabel import cv2 import cv2.aruco import datetime @@ -45,7 +45,7 @@ def _calculate_implementation( parameters=aruco_detector_parameters) if len(marker_corners) <= 0: self._status_message_source.enqueue_status_message( - severity="warning", + severity=SeverityLabel.WARNING, message=f"Image {image_identifier} did not appear to contain any identifiable markers. " f"It will be omitted from the calibration.") continue @@ -121,17 +121,17 @@ def _calculate_implementation( timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), image_resolution=image_resolution, calibrated_values=IntrinsicParameters( - focal_length_x_px=charuco_camera_matrix[0, 0], - focal_length_y_px=charuco_camera_matrix[1, 1], - optical_center_x_px=charuco_camera_matrix[0, 2], - optical_center_y_px=charuco_camera_matrix[1, 2], + focal_length_x_px=float(charuco_camera_matrix[0, 0]), + focal_length_y_px=float(charuco_camera_matrix[1, 1]), + optical_center_x_px=float(charuco_camera_matrix[0, 2]), + optical_center_y_px=float(charuco_camera_matrix[1, 2]), radial_distortion_coefficients=[ - charuco_distortion_coefficients[0, 0], - charuco_distortion_coefficients[1, 0], - charuco_distortion_coefficients[4, 0]], + float(charuco_distortion_coefficients[0, 0]), + float(charuco_distortion_coefficients[1, 0]), + float(charuco_distortion_coefficients[4, 0])], tangential_distortion_coefficients=[ - charuco_distortion_coefficients[2, 0], - charuco_distortion_coefficients[3, 0]]), + float(charuco_distortion_coefficients[2, 0]), + float(charuco_distortion_coefficients[3, 0])]), supplemental_data=supplemental_data) return intrinsic_calibration, used_image_identifiers diff --git a/src/pose_solver/api.py b/src/pose_solver/api.py index 784d96f..e9a4f9a 100644 --- a/src/pose_solver/api.py +++ b/src/pose_solver/api.py @@ -1,10 +1,9 @@ from src.common import \ - MCTRequest, \ - MCTResponse -from src.common.structures import \ DetectorFrame, \ IntrinsicParameters, \ Matrix4x4, \ + MCTRequest, \ + MCTResponse, \ Pose, \ Target from pydantic import Field diff --git a/src/pose_solver/exceptions.py b/src/pose_solver/exceptions.py deleted file mode 100644 index 7cac3f8..0000000 --- a/src/pose_solver/exceptions.py +++ /dev/null @@ -1,9 +0,0 @@ -from src.common import MCTError - - -class PoseSolverException(MCTError): - message: str - - def __init__(self, message: str, *args, **kwargs): - super().__init__(args, kwargs) - self.message = message diff --git a/src/pose_solver/pose_solver.py b/src/pose_solver/pose_solver.py index 9e8a00f..8871a74 100644 --- a/src/pose_solver/pose_solver.py +++ b/src/pose_solver/pose_solver.py @@ -1,15 +1,14 @@ -from .exceptions import \ - PoseSolverException from .structures import \ DetectorRecord, \ PoseSolverParameters -from src.common import MathUtils -from src.common.structures import \ +from src.common import \ Annotation, \ DetectorFrame, \ IntrinsicParameters, \ IterativeClosestPointParameters, \ + MathUtils, \ Matrix4x4, \ + MCTError, \ Pose, \ Ray, \ Target @@ -29,6 +28,14 @@ ValueType = TypeVar("ValueType") +class PoseSolverException(MCTError): + message: str + + def __init__(self, message: str, *args, **kwargs): + super().__init__(args, kwargs) + self.message = message + + class PoseSolver: """ Class containing the actual "solver" logic, kept separate from the API. diff --git a/src/pose_solver/pose_solver_api.py b/src/pose_solver/pose_solver_api.py index b450747..1f258e0 100644 --- a/src/pose_solver/pose_solver_api.py +++ b/src/pose_solver/pose_solver_api.py @@ -9,8 +9,9 @@ PoseSolverSetTargetsRequest, \ PoseSolverStartRequest, \ PoseSolverStopRequest -from .exceptions import PoseSolverException -from .pose_solver import PoseSolver +from .pose_solver import \ + PoseSolver, \ + PoseSolverException from .structures import \ PoseSolverConfiguration from src.common import \ @@ -18,8 +19,7 @@ ErrorResponse, \ MCTComponent, \ MCTRequest, \ - MCTResponse -from src.common.structures import \ + MCTResponse, \ Pose from enum import StrEnum import logging diff --git a/src/pose_solver/structures.py b/src/pose_solver/structures.py index 55033fe..b1bd67c 100644 --- a/src/pose_solver/structures.py +++ b/src/pose_solver/structures.py @@ -1,4 +1,4 @@ -from src.common.structures import \ +from src.common import \ Annotation, \ DetectorFrame import cv2.aruco diff --git a/src/util/generate_target_definition_from_charuco.py b/src/util/generate_target_definition_from_charuco.py index 578311a..369d101 100644 --- a/src/util/generate_target_definition_from_charuco.py +++ b/src/util/generate_target_definition_from_charuco.py @@ -1,5 +1,5 @@ from src.implementations.common_aruco_opencv import ArucoOpenCVCommon -from src.common.structures import Annotation, Landmark, Target +from src.common import Annotation, Landmark, Target board: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index 5a047d8..c7368ed 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -1,22 +1,23 @@ -import cv2 -import numpy -import os -import re from src.common import \ + Annotation, \ + Annotator, \ + ImageResolution, \ ImageUtils, \ IntrinsicCalibrator, \ - StatusMessageSource -from src.common.structures import \ - ImageResolution, \ KeyValueSimpleAny, \ KeyValueSimpleString, \ - Annotation + SeverityLabel, \ + StatusMessageSource from src.implementations.common_aruco_opencv import \ ArucoOpenCVCommon from src.implementations.annotator_aruco_opencv import \ ArucoOpenCVAnnotator from src.implementations.intrinsic_charuco_opencv import \ CharucoOpenCVIntrinsicCalibrator +import cv2 +import numpy +import os +import re from tempfile import TemporaryDirectory from typing import Final import unittest @@ -63,7 +64,9 @@ def test(self): image_filepaths[camera_id][frame_id] = image_filepath image_count: int = sum(len(image_filepaths[camera_id]) for camera_id in image_filepaths.keys()) message = f"Found {image_count} image files." - status_message_source.enqueue_status_message(severity="info", message=message) + status_message_source.enqueue_status_message( + severity=SeverityLabel.INFO, + message=message) # All cameras have the same imaging parameters. # To simplify our lives and ensure a reasonable result, @@ -84,7 +87,7 @@ def test(self): _, calibration_result = calibrator.calculate(image_resolution=IMAGE_RESOLUTION) marker: ArucoOpenCVAnnotator = ArucoOpenCVAnnotator( - configuration={"method": "aruco_opencv"}, + configuration=Annotator.Configuration(method="aruco_opencv"), status_message_source=status_message_source) marker.set_parameters(parameters=MARKER_DETECTION_PARAMETERS) image_marker_snapshots: dict[str, dict[str, list[Annotation]]] = dict() @@ -99,7 +102,9 @@ def test(self): image_marker_snapshots[camera_id][frame_id] = marker_snapshots detection_count += len(marker_snapshots) message = f"{detection_count} detections." - status_message_source.enqueue_status_message(severity="info", message=message) + status_message_source.enqueue_status_message( + severity=SeverityLabel.INFO, + message=message) print(message) # Constraint: Reference board must be visible to all cameras for first frame_id (frame_0) diff --git a/test/test_math_utils.py b/test/test_math_utils.py index 4bb1406..3299104 100644 --- a/test/test_math_utils.py +++ b/test/test_math_utils.py @@ -1,6 +1,6 @@ -from src.common import MathUtils -from src.common.structures import \ +from src.common import \ IterativeClosestPointParameters, \ + MathUtils, \ Ray import datetime import numpy diff --git a/test/test_pose_solver.py b/test/test_pose_solver.py index 04c0162..0d018c0 100644 --- a/test/test_pose_solver.py +++ b/test/test_pose_solver.py @@ -1,5 +1,5 @@ from src.pose_solver.pose_solver import PoseSolver -from src.common.structures import \ +from src.common import \ Annotation, \ DetectorFrame, \ ImageResolution, \ From 45dd941c132f1980d5d102bc0dc2d18cd7a1f52b Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 17 Jul 2025 16:44:07 -0400 Subject: [PATCH 11/33] WIP: ExtrinsicCalibrator --- src/common/extrinsic_calibrator.py | 51 ++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 src/common/extrinsic_calibrator.py diff --git a/src/common/extrinsic_calibrator.py b/src/common/extrinsic_calibrator.py new file mode 100644 index 0000000..210f242 --- /dev/null +++ b/src/common/extrinsic_calibrator.py @@ -0,0 +1,51 @@ +from src.common import \ + StatusMessageSource +import abc +import datetime +from enum import StrEnum +import logging +from pydantic import BaseModel, Field +from typing import Final + + +logger = logging.getLogger(__name__) + + +class _ImageState(StrEnum): + IGNORE: Final[int] = "ignore" + SELECT: Final[int] = "select" + DELETE: Final[int] = "delete" # stage for deletion + + +class _ImageMetadata(BaseModel): + identifier: str = Field() + label: str = Field(default_factory=str) # human-readable label + timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + state: _ImageState = Field(default=_ImageState.SELECT) + + +class ExtrinsicCalibrator(abc.ABC): + + _image_filepaths: dict[tuple[str, str], str] # (detector_id, frame_id) -> image_filepath + _status_message_source: StatusMessageSource + + DATA_FILENAME: Final[str] = "extrinsic_calibration_data.json" + + # data: + # per detector: + # initial_frame transform to reference_target + # final transform to reference_target + # per frame: + # image + # (marker_id,2d_points)s + # final (frame_id,marker_id,3d_points)s + # + # input data: + # per detector: + # per frame: + # PNG: image + # + # output data: + # per detector: + # JSON: transform to reference_target + # JSON: Additional stats, inc. reference_target definition From a0affda27dd391b2b0338e8ea3f888949e377f79 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 6 Aug 2025 16:37:59 -0400 Subject: [PATCH 12/33] WIP (May not be in working state) --- src/common/__init__.py | 14 +- src/common/annotator.py | 2 +- ...intrinsic_calibrator.py => calibration.py} | 408 ++++++++++++------ src/common/extrinsic_calibrator.py | 51 --- src/common/image_processing.py | 76 ---- src/common/math.py | 73 +++- src/gui/panels/calibrator_panel.py | 4 +- .../specialized/calibration_image_table.py | 2 +- .../specialized/calibration_result_table.py | 2 +- 9 files changed, 359 insertions(+), 273 deletions(-) rename src/common/{intrinsic_calibrator.py => calibration.py} (71%) delete mode 100644 src/common/extrinsic_calibrator.py diff --git a/src/common/__init__.py b/src/common/__init__.py index 4e30083..f58e953 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -14,6 +14,12 @@ TimestampGetResponse, \ TimeSyncStartRequest, \ TimeSyncStopRequest +from .calibration import \ + ExtrinsicCalibration, \ + ExtrinsicCalibrationDetectorResult, \ + IntrinsicCalibration, \ + IntrinsicCalibrator, \ + MCTIntrinsicCalibrationError from .camera import \ Camera, \ MCTCameraRuntimeError @@ -21,13 +27,9 @@ Annotation, \ ImageFormat, \ ImageResolution, \ - ImageUtils, \ - IntrinsicCalibration, \ - IntrinsicParameters -from .intrinsic_calibrator import \ - IntrinsicCalibrator, \ - MCTIntrinsicCalibrationError + ImageUtils from .math import \ + IntrinsicParameters, \ IterativeClosestPointParameters, \ Landmark, \ MathUtils, \ diff --git a/src/common/annotator.py b/src/common/annotator.py index 4d9872e..4b10a26 100644 --- a/src/common/annotator.py +++ b/src/common/annotator.py @@ -34,7 +34,7 @@ def __init__(self, message: str, *args): class Annotator(abc.ABC): """ - Functions may raise MCTMarkerRuntimeError + Functions may raise MCTAnnotatorRuntimeError """ Configuration: type[_Configuration] = _Configuration diff --git a/src/common/intrinsic_calibrator.py b/src/common/calibration.py similarity index 71% rename from src/common/intrinsic_calibrator.py rename to src/common/calibration.py index 81e345a..ebf8c01 100644 --- a/src/common/intrinsic_calibrator.py +++ b/src/common/calibration.py @@ -1,7 +1,9 @@ from .image_processing import \ ImageResolution, \ - ImageUtils, \ - IntrinsicCalibration + ImageUtils +from .math import \ + IntrinsicParameters, \ + Matrix4x4 from .serialization import \ IOUtils from .status import \ @@ -32,6 +34,29 @@ def __init__(self, message: str, *args): self.message = message +class ExtrinsicCalibrationDetectorResult(BaseModel): + detector_label: str = Field() + detector_to_reference: Matrix4x4 = Field() + + +class ExtrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + calibrated_values: list[ExtrinsicCalibrationDetectorResult] = Field() + supplemental_data: dict = Field() + + +class IntrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + image_resolution: ImageResolution = Field() + calibrated_values: IntrinsicParameters = Field() + supplemental_data: dict = Field() + + +# ===================================================================================================================== +# Internal structures applicable to both intrinsic and extrinsic calibrations +# ===================================================================================================================== + + class _Configuration(BaseModel): data_path: str = Field() @@ -44,26 +69,22 @@ class _ImageState(StrEnum): class _ImageMetadata(BaseModel): identifier: str = Field() - label: str = Field(default_factory=str) # human-readable label + detector_label: str = Field() + image_resolution: ImageResolution = Field() + image_label: str = Field(default_factory=str) # human-readable label timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) state: _ImageState = Field(default=_ImageState.SELECT) class _ResultState(StrEnum): - # indicate to use this calibration (as opposed to simply storing it) - # normally there shall only ever be one ACTIVE calibration for a given image resolution - ACTIVE = "active" - - # store the calibration, but don't mark it for use - RETAIN = "retain" - - # stage for deletion - DELETE = "delete" + ACTIVE = "active" # will be stored AND marked for use. Only one result expected to be active per image resolution. + RETAIN = "retain" # store, but do not use + DELETE = "delete" # stage for deletion class _ResultMetadata(BaseModel): identifier: str = Field() - label: str = Field(default_factory=str) + result_label: str = Field(default_factory=str) timestamp_utc_iso8601: str = Field( default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) image_identifiers: list[str] = Field(default_factory=list) @@ -73,24 +94,152 @@ def timestamp_utc(self): return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) -class _DataMapValue(BaseModel): +class Calibrator(abc.ABC): + + _status_message_source: StatusMessageSource + + def __init__( + self, + status_message_source: StatusMessageSource + ): + self._status_message_source = status_message_source + + def _delete_if_exists(self, filepath: str): + try: + os.remove(filepath) + except FileNotFoundError as e: + logger.error(e) + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=f"Failed to remove a file from the calibrator because it does not exist. " + f"See its internal log for details.") + except OSError as e: + logger.error(e) + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=f"Failed to remove a file from the calibrator due to an unexpected reason. " + f"See its internal log for details.") + + def _exists_on_filesystem( + self, + path: str, + pathtype: IOUtils.PathType, + create_path: bool = False + ) -> bool: + return IOUtils.exists( + path=path, + pathtype=pathtype, + create_path=create_path, + on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=msg), + on_error_for_dev=logger.error) + + def _load_dict_from_filepath( + self, + filepath: str + ) -> tuple[dict, bool]: + """ + :return: + dict containing existing data (or empty if an unexpected error occurred) + bool indicating whether if loaded or if it can be created without overwriting existing data. False otherwise. + """ + if not os.path.exists(filepath): + return dict(), True + elif not os.path.isfile(filepath): + logger.critical(f"Calibration map file location {filepath} exists but is not a file.") + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.CRITICAL, + message="Filepath location for calibration map exists but is not a file. " + "Most likely a directory exists at that location, " + "and it needs to be manually removed.") + return dict(), False + json_dict: dict = IOUtils.hjson_read( + filepath=filepath, + on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=msg), + on_error_for_dev=logger.error) + if not json_dict: + logger.error(f"Failed to load calibration map from file {filepath}.") + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message="Failed to load calibration map from file.") + return dict(), False + return json_dict, True + + def _save_dict_to_filepath( + self, + filepath: str, + json_dict: dict + ) -> None: + IOUtils.json_write( + filepath=filepath, + json_dict=json_dict, + on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=msg), + on_error_for_dev=logger.error) + + def _save_image_to_filepath( + self, + filepath: str, + image: numpy.ndarray + ) -> bool: + """ + Returns true if successful, False otherwise. + """ + # Before making any changes to the calibration map, make sure folders exist + if not self._exists_on_filesystem(path=os.path.dirname(filepath), pathtype="path", create_path=True): + message = "Failed to create storage location for input image." + logger.error(message) + self._status_message_source.enqueue_status_message(severity=SeverityLabel.ERROR, message=message) + return False + # Also make sure that this file does not somehow already exist (highly unlikely) + if os.path.exists(filepath): + logger.error(f"Image {filepath} appears to already exist. This is never expected to occur.") + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message="Image appears to already exist. This is never expected to occur. " + "Please try again, and if this error continues to occur then please report a bug.") + return False + image_bytes: bytes + image_bytes = ImageUtils.image_to_bytes(image_data=image, image_format=IntrinsicCalibrator.IMAGE_FORMAT) + try: + with (open(filepath, 'wb') as in_file): + in_file.write(image_bytes) + except IOError as e: + logger.error(f"Failed to save image to {filepath}, reason: {str(e)}.") + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message="Failed to save image - see calibration log for more details.") + return False + return True + + +# ===================================================================================================================== +# Intrinsic calibration +# ===================================================================================================================== + + +class _IntrinsicDataMapValue(BaseModel): image_metadata_list: list[_ImageMetadata] = Field(default_factory=list) result_metadata_list: list[_ResultMetadata] = Field(default_factory=list) -class _DataMapEntry(BaseModel): +class _IntrinsicDataMapEntry(BaseModel): key: ImageResolution = Field() - value: _DataMapValue = Field() + value: _IntrinsicDataMapValue = Field() -class _DataMap(BaseModel): - entries: list[_DataMapEntry] = Field(default_factory=list) +class _IntrinsicDataMap(BaseModel): + entries: list[_IntrinsicDataMapEntry] = Field(default_factory=list) - def as_dict(self) -> dict[ImageResolution, _DataMapValue]: - return_value: dict[ImageResolution, _DataMapValue] = dict() + def as_dict(self) -> dict[ImageResolution, _IntrinsicDataMapValue]: + return_value: dict[ImageResolution, _IntrinsicDataMapValue] = dict() for entry in self.entries: if entry.key not in return_value: - return_value[entry.key] = _DataMapValue() + return_value[entry.key] = _IntrinsicDataMapValue() for image_metadata in entry.value.image_metadata_list: return_value[entry.key].image_metadata_list.append(image_metadata) for result_metadata in entry.value.result_metadata_list: @@ -98,26 +247,26 @@ def as_dict(self) -> dict[ImageResolution, _DataMapValue]: return return_value @staticmethod - def from_dict(in_dict: dict[ImageResolution, _DataMapValue]): - entries: list[_DataMapEntry] = list() + def from_dict(in_dict: dict[ImageResolution, _IntrinsicDataMapValue]): + entries: list[_IntrinsicDataMapEntry] = list() for key in in_dict.keys(): - entries.append(_DataMapEntry(key=key, value=in_dict[key])) - return _DataMap(entries=entries) + entries.append(_IntrinsicDataMapEntry(key=key, value=in_dict[key])) + return _IntrinsicDataMap(entries=entries) -class IntrinsicCalibrator(abc.ABC): +class IntrinsicCalibrator(Calibrator, abc.ABC): Configuration: type[_Configuration] = _Configuration ImageState: type[_ImageState] = _ImageState ImageMetadata: type[_ImageMetadata] = _ImageMetadata ResultState: type[_ResultState] = _ResultState ResultMetadata: type[_ResultMetadata] = _ResultMetadata - DataMap: type[_DataMap] = _DataMap + DataMap: type[_IntrinsicDataMap] = _IntrinsicDataMap - _configuration: Configuration - _calibration_map: dict[ImageResolution, _DataMapValue] - _status_message_source: StatusMessageSource + _calibration_map: dict[ImageResolution, _IntrinsicDataMapValue] - CALIBRATION_MAP_FILENAME: Final[str] = "calibration_map.json" + _data_path: str + _data_ledger_filepath: str + CALIBRATION_MAP_FILENAME: Final[str] = "intrinsic_calibrations.json" IMAGE_FORMAT: Final[str] = ".png" # work in lossless image format RESULT_FORMAT: Final[str] = ".json" @@ -127,15 +276,18 @@ def __init__( configuration: Configuration, status_message_source: StatusMessageSource ): - self._configuration = configuration - self._status_message_source = status_message_source - if not self._exists_on_filesystem(path=self._configuration.data_path, pathtype="path", create_path=True): + super().__init__(status_message_source=status_message_source) + + self._data_path = configuration.data_path + if not self._exists_on_filesystem(path=self._data_path, pathtype="path", create_path=True): self._status_message_source.enqueue_status_message( severity=SeverityLabel.CRITICAL, message="Data path does not exist and could not be created.") - detailed_message: str = f"{self._configuration.data_path} does not exist and could not be created." + detailed_message: str = f"{self._data_path} does not exist and could not be created." logger.critical(detailed_message) raise RuntimeError(detailed_message) + + self._data_ledger_filepath = os.path.join(self._data_path, IntrinsicCalibrator.CALIBRATION_MAP_FILENAME) if not self.load(): message: str = "The calibration map could not be loaded or created. "\ "In order to avoid data loss, the software will now abort. " \ @@ -146,31 +298,23 @@ def __init__( def add_image( self, - image_base64: str + image_base64: str, + detector_label: str = "" ) -> str: # id of image image_data: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") map_key: ImageResolution = ImageResolution(x_px=image_data.shape[1], y_px=image_data.shape[0]) - # Before making any changes to the calibration map, make sure folders exist, - # and that this file does not somehow already exist (highly unlikely) - key_path: str = self._path_for_map_key(map_key=map_key) - if not self._exists_on_filesystem(path=key_path, pathtype="path", create_path=True): - raise MCTIntrinsicCalibrationError(message=f"Failed to create storage location for input image.") image_identifier: str = str(uuid.uuid4()) - image_filepath = self._image_filepath( - map_key=map_key, - image_identifier=image_identifier) - if os.path.exists(image_filepath): - raise MCTIntrinsicCalibrationError( - message=f"Image {image_identifier} appears to already exist. This is never expected to occur. " - f"Please try again, and if this error continues to occur then please report a bug.") + image_filepath = self._image_filepath(map_key=map_key, image_identifier=image_identifier) + self._save_image_to_filepath( + filepath=image_filepath, + image=image_data) if map_key not in self._calibration_map: - self._calibration_map[map_key] = _DataMapValue() + self._calibration_map[map_key] = _IntrinsicDataMapValue() self._calibration_map[map_key].image_metadata_list.append( - IntrinsicCalibrator.ImageMetadata(identifier=image_identifier)) - # noinspection PyTypeChecker - image_bytes = ImageUtils.image_to_bytes(image_data=image_data, image_format=IntrinsicCalibrator.IMAGE_FORMAT) - with (open(image_filepath, 'wb') as in_file): - in_file.write(image_bytes) + IntrinsicCalibrator.ImageMetadata( + identifier=image_identifier, + detector_label=detector_label, + image_resolution=map_key)) self.save() return image_identifier @@ -192,7 +336,7 @@ def calculate( map_key=calibration_key, result_identifier=result_identifier) - calibration_value: _DataMapValue = self._calibration_map[calibration_key] + calibration_value: _IntrinsicDataMapValue = self._calibration_map[calibration_key] # don't load images right away in case of memory constraints image_identifiers: list[str] = list() for image_metadata in calibration_value.image_metadata_list: @@ -238,25 +382,9 @@ def _calculate_implementation( ) -> tuple[IntrinsicCalibration, list[str]]: # image_identifiers that were actually used in calibration pass - def _delete_if_exists(self, filepath: str): - try: - os.remove(filepath) - except FileNotFoundError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=f"Failed to remove a file from the calibrator because it does not exist. " - f"See its internal log for details.") - except OSError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=f"Failed to remove a file from the calibrator due to an unexpected reason. " - f"See its internal log for details.") - def delete_staged(self) -> None: for calibration_key in self._calibration_map.keys(): - calibration_value: _DataMapValue = self._calibration_map[calibration_key] + calibration_value: _IntrinsicDataMapValue = self._calibration_map[calibration_key] image_indices_to_delete: list = list() for image_index, image in enumerate(calibration_value.image_metadata_list): if image.state == _ImageState.DELETE: @@ -277,21 +405,6 @@ def delete_staged(self) -> None: del calibration_value.result_metadata_list[i] self.save() - def _exists_on_filesystem( - self, - path: str, - pathtype: IOUtils.PathType, - create_path: bool = False - ) -> bool: - return IOUtils.exists( - path=path, - pathtype=pathtype, - create_path=create_path, - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=msg), - on_error_for_dev=logger.error) - # noinspection DuplicatedCode def get_image( self, @@ -440,9 +553,9 @@ def _image_filepath( map_key: ImageResolution, image_identifier: str ) -> str: - key_path: str = self._path_for_map_key(map_key=map_key) return os.path.join( - key_path, + self._data_path, + str(map_key), image_identifier + IntrinsicCalibrator.IMAGE_FORMAT) def list_resolutions(self) -> list[ImageResolution]: @@ -475,29 +588,10 @@ def load(self) -> bool: """ :return: True if loaded or if it can be created without overwriting existing data. False otherwise. """ - calibration_map_filepath: str = self._map_filepath() - if not os.path.exists(calibration_map_filepath): - self._calibration_map = dict() - return True - elif not os.path.isfile(calibration_map_filepath): - logger.critical(f"Calibration map file location {calibration_map_filepath} exists but is not a file.") - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.CRITICAL, - message="Filepath location for calibration map exists but is not a file. " - "Most likely a directory exists at that location, " - "and it needs to be manually removed.") - return False - json_dict: dict = IOUtils.hjson_read( - filepath=calibration_map_filepath, - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=msg), - on_error_for_dev=logger.error) - if not json_dict: - logger.error(f"Failed to load calibration map from file {calibration_map_filepath}.") - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Failed to load calibration map from file.") + json_dict: dict + load_success: bool + json_dict, load_success = self._load_dict_from_filepath(filepath=self._data_ledger_filepath) + if not load_success: return False calibration_map: IntrinsicCalibrator.DataMap try: @@ -511,33 +605,20 @@ def load(self) -> bool: self._calibration_map = calibration_map.as_dict() return True - def _map_filepath(self) -> str: - return os.path.join(self._configuration.data_path, IntrinsicCalibrator.CALIBRATION_MAP_FILENAME) - - def _path_for_map_key( - self, - map_key: ImageResolution - ) -> str: - return os.path.join(self._configuration.data_path, str(map_key)) - def _result_filepath( self, map_key: ImageResolution, result_identifier: str ) -> str: - key_path: str = self._path_for_map_key(map_key=map_key) return os.path.join( - key_path, + self._data_path, + str(map_key), result_identifier + IntrinsicCalibrator.RESULT_FORMAT) def save(self) -> None: - IOUtils.json_write( - filepath=self._map_filepath(), - json_dict=IntrinsicCalibrator.DataMap.from_dict(self._calibration_map).model_dump(), - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=msg), - on_error_for_dev=logger.error) + return self._save_dict_to_filepath( + filepath=self._data_ledger_filepath, + json_dict=IntrinsicCalibrator.DataMap.from_dict(self._calibration_map).model_dump()) # noinspection DuplicatedCode def update_image_metadata( @@ -552,7 +633,7 @@ def update_image_metadata( if image.identifier == image_identifier: image.state = image_state if image_label is not None: - image.label = image_label + image.image_label = image_label found_count += 1 break if found_count < 1: @@ -578,7 +659,7 @@ def update_result_metadata( if result.identifier == result_identifier: result.state = result_state if result_label is not None: - result.label = result_label + result.result_label = result_label found_count += 1 matching_map_keys.add(map_key) break @@ -603,3 +684,66 @@ def update_result_metadata( "It may be prudent to either manually correct it, or recreate it.") self.save() + + +# ===================================================================================================================== +# Extrinsic calibration +# ===================================================================================================================== + + +class _ExtrinsicDataListing(BaseModel): + image_metadata_list: list[_ImageMetadata] = Field(default_factory=list) + result_metadata_list: list[_ResultMetadata] = Field(default_factory=list) + + +class ExtrinsicCalibrator(abc.ABC): + Configuration: type[_Configuration] = _Configuration + ImageState: type[_ImageState] = _ImageState + ImageMetadata: type[_ImageMetadata] = _ImageMetadata + ResultState: type[_ResultState] = _ResultState + ResultMetadata: type[_ResultMetadata] = _ResultMetadata + + _image_filepaths: dict[tuple[str, str], str] # (detector_id, timestamp_iso8601) -> image_filepath + + DATA_FILENAME: Final[str] = "extrinsic_calibrations.json" + + def __init__( + self, + configuration: Configuration, + status_message_source: StatusMessageSource + ): + self._configuration = configuration + self._status_message_source = status_message_source + if not self._exists_on_filesystem(path=self._configuration.data_path, pathtype="path", create_path=True): + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.CRITICAL, + message="Data path does not exist and could not be created.") + detailed_message: str = f"{self._configuration.data_path} does not exist and could not be created." + logger.critical(detailed_message) + raise RuntimeError(detailed_message) + if not self.load(): + message: str = "The calibration map could not be loaded or created. "\ + "In order to avoid data loss, the software will now abort. " \ + "Please manually correct or remove the file in the filesystem." + logger.critical(message) + self._status_message_source.enqueue_status_message(severity=SeverityLabel.CRITICAL, message=message) + raise RuntimeError(message) + + # data: + # per detector: + # initial_frame transform to reference_target + # final transform to reference_target + # per frame: + # image + # (marker_id,2d_points)s + # final (frame_id,marker_id,3d_points)s + # + # input data: + # per detector: + # per frame: + # PNG: image + # + # output data: + # per detector: + # JSON: transform to reference_target + # JSON: Additional stats, inc. reference_target definition diff --git a/src/common/extrinsic_calibrator.py b/src/common/extrinsic_calibrator.py deleted file mode 100644 index 210f242..0000000 --- a/src/common/extrinsic_calibrator.py +++ /dev/null @@ -1,51 +0,0 @@ -from src.common import \ - StatusMessageSource -import abc -import datetime -from enum import StrEnum -import logging -from pydantic import BaseModel, Field -from typing import Final - - -logger = logging.getLogger(__name__) - - -class _ImageState(StrEnum): - IGNORE: Final[int] = "ignore" - SELECT: Final[int] = "select" - DELETE: Final[int] = "delete" # stage for deletion - - -class _ImageMetadata(BaseModel): - identifier: str = Field() - label: str = Field(default_factory=str) # human-readable label - timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) - state: _ImageState = Field(default=_ImageState.SELECT) - - -class ExtrinsicCalibrator(abc.ABC): - - _image_filepaths: dict[tuple[str, str], str] # (detector_id, frame_id) -> image_filepath - _status_message_source: StatusMessageSource - - DATA_FILENAME: Final[str] = "extrinsic_calibration_data.json" - - # data: - # per detector: - # initial_frame transform to reference_target - # final transform to reference_target - # per frame: - # image - # (marker_id,2d_points)s - # final (frame_id,marker_id,3d_points)s - # - # input data: - # per detector: - # per frame: - # PNG: image - # - # output data: - # per detector: - # JSON: transform to reference_target - # JSON: Additional stats, inc. reference_target definition diff --git a/src/common/image_processing.py b/src/common/image_processing.py index 393ebea..c6a4533 100644 --- a/src/common/image_processing.py +++ b/src/common/image_processing.py @@ -2,7 +2,6 @@ import cv2 from enum import StrEnum import logging -import math # Python's math module, not the one from this project! import numpy from pydantic import BaseModel, Field from typing import ClassVar, Literal, Final @@ -83,81 +82,6 @@ def from_str(in_str: str) -> 'ImageResolution': return ImageResolution(x_px=x_px, y_px=y_px) -class IntrinsicParameters(BaseModel): - """ - Camera intrinsic parameters (focal length, optical center, distortion coefficients). - See OpenCV's documentation: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html - See Wikipedia article: https://en.wikipedia.org/wiki/Distortion_%28optics%29 - """ - - focal_length_x_px: float = Field() - focal_length_y_px: float = Field() - optical_center_x_px: float = Field() - optical_center_y_px: float = Field() - - radial_distortion_coefficients: list[float] = Field() # k1, k2, k3 etc in OpenCV - - tangential_distortion_coefficients: list[float] = Field() # p1, p2 in OpenCV - - def as_array(self) -> list[float]: - return_value: list[float] = [ - self.focal_length_x_px, - self.focal_length_y_px, - self.optical_center_x_px, - self.optical_center_y_px] - return_value += self.get_distortion_coefficients() - return return_value - - def get_matrix(self) -> list[list[float]]: - """calibration matrix expected by OpenCV in some operations""" - return \ - [[self.focal_length_x_px, 0.0, self.optical_center_x_px], - [0.0, self.focal_length_y_px, self.optical_center_y_px], - [0.0, 0.0, 1.0]] - - def get_distortion_coefficients(self) -> list[float]: - """ - Distortion coefficients in array format expected by OpenCV in some operations. - See https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d - calibrateCamera() documentation describes order of distortion coefficients that OpenCV works with - """ - coefficients: list[float] = [ - self.radial_distortion_coefficients[0], - self.radial_distortion_coefficients[1], - self.tangential_distortion_coefficients[0], - self.tangential_distortion_coefficients[1]] - coefficients += self.radial_distortion_coefficients[2:] - return coefficients - - @staticmethod - def generate_zero_parameters( - resolution_x_px: int, - resolution_y_px: int, - fov_x_degrees: float = 45.0, - fov_y_degrees: float = 45.0 - ) -> "IntrinsicParameters": - optical_center_x_px: int = int(round(resolution_x_px/2.0)) - fov_x_radians: float = fov_x_degrees * math.pi / 180.0 - focal_length_x_px = (resolution_x_px / 2.0) / math.tan(fov_x_radians / 2.0) - optical_center_y_px: int = int(round(resolution_y_px/2.0)) - fov_y_radians: float = fov_y_degrees * math.pi / 180.0 - focal_length_y_px = (resolution_y_px / 2.0) / math.tan(fov_y_radians / 2.0) - return IntrinsicParameters( - focal_length_x_px=focal_length_x_px, - focal_length_y_px=focal_length_y_px, - optical_center_x_px=optical_center_x_px, - optical_center_y_px=optical_center_y_px, - radial_distortion_coefficients=[0.0, 0.0, 0.0], - tangential_distortion_coefficients=[0.0, 0.0]) - - -class IntrinsicCalibration(BaseModel): - timestamp_utc: str = Field() - image_resolution: ImageResolution = Field() - calibrated_values: IntrinsicParameters = Field() - supplemental_data: dict = Field() - - class ImageUtils: """ A "class" to group related static functions, like in a namespace. diff --git a/src/common/math.py b/src/common/math.py index cd91235..d6c2dd5 100644 --- a/src/common/math.py +++ b/src/common/math.py @@ -1,7 +1,6 @@ -from .image_processing import \ - Annotation, \ - IntrinsicParameters +from .image_processing import Annotation import cv2 +import math # Python's math module, not the one from this project! import numpy from pydantic import BaseModel, Field from scipy.spatial.transform import Rotation @@ -11,6 +10,74 @@ _DEFAULT_EPSILON: Final[float] = 0.0001 +class IntrinsicParameters(BaseModel): + """ + Camera intrinsic parameters (focal length, optical center, distortion coefficients). + See OpenCV's documentation: https://docs.opencv.org/4.x/dc/dbb/tutorial_py_calibration.html + See Wikipedia article: https://en.wikipedia.org/wiki/Distortion_%28optics%29 + """ + + focal_length_x_px: float = Field() + focal_length_y_px: float = Field() + optical_center_x_px: float = Field() + optical_center_y_px: float = Field() + + radial_distortion_coefficients: list[float] = Field() # k1, k2, k3 etc in OpenCV + + tangential_distortion_coefficients: list[float] = Field() # p1, p2 in OpenCV + + def as_array(self) -> list[float]: + return_value: list[float] = [ + self.focal_length_x_px, + self.focal_length_y_px, + self.optical_center_x_px, + self.optical_center_y_px] + return_value += self.get_distortion_coefficients() + return return_value + + def get_matrix(self) -> list[list[float]]: + """calibration matrix expected by OpenCV in some operations""" + return \ + [[self.focal_length_x_px, 0.0, self.optical_center_x_px], + [0.0, self.focal_length_y_px, self.optical_center_y_px], + [0.0, 0.0, 1.0]] + + def get_distortion_coefficients(self) -> list[float]: + """ + Distortion coefficients in array format expected by OpenCV in some operations. + See https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d + calibrateCamera() documentation describes order of distortion coefficients that OpenCV works with + """ + coefficients: list[float] = [ + self.radial_distortion_coefficients[0], + self.radial_distortion_coefficients[1], + self.tangential_distortion_coefficients[0], + self.tangential_distortion_coefficients[1]] + coefficients += self.radial_distortion_coefficients[2:] + return coefficients + + @staticmethod + def generate_zero_parameters( + resolution_x_px: int, + resolution_y_px: int, + fov_x_degrees: float = 45.0, + fov_y_degrees: float = 45.0 + ) -> "IntrinsicParameters": + optical_center_x_px: int = int(round(resolution_x_px/2.0)) + fov_x_radians: float = fov_x_degrees * math.pi / 180.0 + focal_length_x_px = (resolution_x_px / 2.0) / math.tan(fov_x_radians / 2.0) + optical_center_y_px: int = int(round(resolution_y_px/2.0)) + fov_y_radians: float = fov_y_degrees * math.pi / 180.0 + focal_length_y_px = (resolution_y_px / 2.0) / math.tan(fov_y_radians / 2.0) + return IntrinsicParameters( + focal_length_x_px=focal_length_x_px, + focal_length_y_px=focal_length_y_px, + optical_center_x_px=optical_center_x_px, + optical_center_y_px=optical_center_y_px, + radial_distortion_coefficients=[0.0, 0.0, 0.0], + tangential_distortion_coefficients=[0.0, 0.0]) + + class IterativeClosestPointParameters(BaseModel): # ICP will stop after this many iterations termination_iteration_count: int = Field() diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/calibrator_panel.py index c4e986b..c160a76 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/calibrator_panel.py @@ -561,7 +561,7 @@ def _update_ui_controls(self) -> None: else: image_metadata: IntrinsicCalibrator.ImageMetadata = self._image_metadata_list[image_index] self._image_label_textbox.Enable(True) - self._image_label_textbox.textbox.SetValue(image_metadata.label) + self._image_label_textbox.textbox.SetValue(image_metadata.image_label) self._image_state_selector.Enable(True) self._image_state_selector.selector.SetStringSelection(image_metadata.state.name) self._image_update_button.Enable(True) @@ -590,7 +590,7 @@ def _update_ui_controls(self) -> None: result_metadata: IntrinsicCalibrator.ResultMetadata = self._result_metadata_list[result_index] self._result_display_textbox.Enable(True) self._result_label_textbox.Enable(True) - self._result_label_textbox.textbox.SetValue(result_metadata.label) + self._result_label_textbox.textbox.SetValue(result_metadata.result_label) self._result_state_selector.Enable(True) self._result_state_selector.selector.SetStringSelection(result_metadata.state.name) self._result_update_button.Enable(True) diff --git a/src/gui/panels/specialized/calibration_image_table.py b/src/gui/panels/specialized/calibration_image_table.py index 03cd314..f41dc00 100644 --- a/src/gui/panels/specialized/calibration_image_table.py +++ b/src/gui/panels/specialized/calibration_image_table.py @@ -35,7 +35,7 @@ def _set_row_contents( self.table.SetCellValue( row=row_index, col=_COL_IDX_LABEL, - s=row_content.label) + s=row_content.image_label) self.table.SetCellValue( row=row_index, col=_COL_IDX_TIMESTAMP, diff --git a/src/gui/panels/specialized/calibration_result_table.py b/src/gui/panels/specialized/calibration_result_table.py index d7e69e9..bf2ebc4 100644 --- a/src/gui/panels/specialized/calibration_result_table.py +++ b/src/gui/panels/specialized/calibration_result_table.py @@ -35,7 +35,7 @@ def _set_row_contents( self.table.SetCellValue( row=row_index, col=_COL_IDX_LABEL, - s=row_content.label) + s=row_content.result_label) self.table.SetCellValue( row=row_index, col=_COL_IDX_TIMESTAMP, From a5ef48857454d6b9a9033dbf730b056285aa5157 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Fri, 8 Aug 2025 12:51:46 -0400 Subject: [PATCH 13/33] MNT: Consolidate common calibration functionality --- src/common/__init__.py | 1 + src/common/calibration.py | 947 +++++++++--------- src/detector/detector.py | 12 +- .../extrinsic_charuco_opencv.py | 54 + .../intrinsic_charuco_opencv.py | 21 +- test/test_extrinsic_calibration.py | 13 - 6 files changed, 540 insertions(+), 508 deletions(-) create mode 100644 src/implementations/extrinsic_charuco_opencv.py diff --git a/src/common/__init__.py b/src/common/__init__.py index f58e953..da3162f 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -17,6 +17,7 @@ from .calibration import \ ExtrinsicCalibration, \ ExtrinsicCalibrationDetectorResult, \ + ExtrinsicCalibrator, \ IntrinsicCalibration, \ IntrinsicCalibrator, \ MCTIntrinsicCalibrationError diff --git a/src/common/calibration.py b/src/common/calibration.py index ebf8c01..43fb33c 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -1,4 +1,5 @@ from .image_processing import \ + ImageFormat, \ ImageResolution, \ ImageUtils from .math import \ @@ -13,13 +14,11 @@ import abc import datetime from enum import StrEnum -import json -from json import JSONDecodeError import logging import numpy import os from pydantic import BaseModel, Field, ValidationError -from typing import Final +from typing import Final, Optional import uuid @@ -34,22 +33,7 @@ def __init__(self, message: str, *args): self.message = message -class ExtrinsicCalibrationDetectorResult(BaseModel): - detector_label: str = Field() - detector_to_reference: Matrix4x4 = Field() - - -class ExtrinsicCalibration(BaseModel): - timestamp_utc: str = Field() - calibrated_values: list[ExtrinsicCalibrationDetectorResult] = Field() - supplemental_data: dict = Field() - - -class IntrinsicCalibration(BaseModel): - timestamp_utc: str = Field() - image_resolution: ImageResolution = Field() - calibrated_values: IntrinsicParameters = Field() - supplemental_data: dict = Field() +_RESULT_FORMAT: Final[str] = ".json" # ===================================================================================================================== @@ -69,12 +53,16 @@ class _ImageState(StrEnum): class _ImageMetadata(BaseModel): identifier: str = Field() + filepath: str = Field() detector_label: str = Field() - image_resolution: ImageResolution = Field() + resolution: ImageResolution = Field() image_label: str = Field(default_factory=str) # human-readable label timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) state: _ImageState = Field(default=_ImageState.SELECT) + def is_selected(self): + return self.state == _ImageState.SELECT + class _ResultState(StrEnum): ACTIVE = "active" # will be stored AND marked for use. Only one result expected to be active per image resolution. @@ -84,6 +72,8 @@ class _ResultState(StrEnum): class _ResultMetadata(BaseModel): identifier: str = Field() + filepath: str = Field() + resolution: ImageResolution | None = Field(default=None) # Used in intrinsic, not currently used in extrinsic result_label: str = Field(default_factory=str) timestamp_utc_iso8601: str = Field( default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) @@ -94,17 +84,97 @@ def timestamp_utc(self): return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) -class Calibrator(abc.ABC): +class _DataLedger(BaseModel): + image_metadata_list: list[_ImageMetadata] = Field(default_factory=list) + result_metadata_list: list[_ResultMetadata] = Field(default_factory=list) + + +class AbstractCalibrator(abc.ABC): + + _data_path: str + + _DATA_LEDGER_FILENAME: Final[str] = "data_ledger.json" + _data_ledger: _DataLedger + _data_ledger_filepath: str _status_message_source: StatusMessageSource def __init__( self, - status_message_source: StatusMessageSource + status_message_source: StatusMessageSource, + data_path: str ): self._status_message_source = status_message_source - def _delete_if_exists(self, filepath: str): + self._data_path = data_path + if not self._exists_on_filesystem(path=self._data_path, pathtype="path", create_path=True): + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.CRITICAL, + message="Data path does not exist and could not be created.") + detailed_message: str = f"{self._data_path} does not exist and could not be created." + logger.critical(detailed_message) + raise RuntimeError(detailed_message) + + self._data_ledger_filepath = os.path.join(self._data_path, AbstractCalibrator._DATA_LEDGER_FILENAME) + if not self._load_data_ledger(): + message: str = "The data ledger could not be loaded or created. "\ + "In order to avoid data loss, the software will now abort. " \ + "Please manually correct or remove the file in the filesystem." + logger.critical(message) + self._status_message_source.enqueue_status_message(severity=SeverityLabel.CRITICAL, message=message) + raise RuntimeError(message) + + def _add_image( + self, + image: numpy.ndarray, + metadata: _ImageMetadata, + ) -> bool: + """ + Helper for saving images consistently across different types of calibrators + Returns true if successful, False otherwise. + """ + # Before making any changes to the data ledger, make sure folders exist + if not self._exists_on_filesystem(path=os.path.dirname(metadata.filepath), pathtype="path", create_path=True): + message = "Failed to create storage location for input image." + logger.error(message) + self._status_message_source.enqueue_status_message(severity=SeverityLabel.ERROR, message=message) + return False + # Also make sure that this file does not somehow already exist (highly unlikely) + if os.path.exists(metadata.filepath): + logger.error(f"Image {metadata.filepath} appears to already exist. This is never expected to occur.") + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message="Image appears to already exist. This is never expected to occur. " + "Please try again, and if this error continues to occur then please report a bug.") + return False + image_bytes: bytes + image_bytes = ImageUtils.image_to_bytes(image_data=image, image_format=ImageFormat.FORMAT_PNG) + try: + with (open(metadata.filepath, 'wb') as in_file): + in_file.write(image_bytes) + except IOError as e: + logger.error(f"Failed to save image to {metadata.filepath}, reason: {str(e)}.") + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message="Failed to save image - see calibration log for more details.") + return False + self._data_ledger.image_metadata_list.append(metadata) + self._save_data_ledger() + return True + + def _add_result( + self, + result: dict, + metadata: _ResultMetadata + ) -> None: + self._save_dict_to_filepath( + filepath=metadata.filepath, + json_dict=result, + ignore_none=True) + self._data_ledger.result_metadata_list.append(metadata) + self._save_data_ledger() + + def _delete_file_if_exists(self, filepath: str): try: os.remove(filepath) except FileNotFoundError as e: @@ -120,6 +190,25 @@ def _delete_if_exists(self, filepath: str): message=f"Failed to remove a file from the calibrator due to an unexpected reason. " f"See its internal log for details.") + def delete_staged(self) -> None: + image_indices_to_delete: list = list() + image_metadata: _ImageMetadata + for image_index, image_metadata in enumerate(self._data_ledger.image_metadata_list): + if image_metadata.state == _ImageState.DELETE: + self._delete_file_if_exists(image_metadata.filepath) + image_indices_to_delete.append(image_index) + for i in reversed(image_indices_to_delete): + del self._data_ledger.image_metadata_list[i] + result_indices_to_delete: list = list() + result_metadata: _ResultMetadata + for result_index, result_metadata in enumerate(self._data_ledger.result_metadata_list): + if result_metadata.state == _ResultState.DELETE: + self._delete_file_if_exists(result_metadata.filepath) + result_indices_to_delete.append(result_index) + for i in reversed(result_indices_to_delete): + del self._data_ledger.result_metadata_list[i] + self._save_data_ledger() + def _exists_on_filesystem( self, path: str, @@ -135,6 +224,50 @@ def _exists_on_filesystem( message=msg), on_error_for_dev=logger.error) + def _get_result_metadata_by_identifier( + self, + identifier: str + ) -> _ResultMetadata: + match_count: int = 0 + matching_result_metadata: _ResultMetadata | None = None + for result_metadata in self._data_ledger.result_metadata_list: + if result_metadata.identifier == identifier: + match_count += 1 + matching_result_metadata = result_metadata + break + if match_count < 1: + raise MCTIntrinsicCalibrationError( + message=f"Identifier {identifier} is not associated with any result.") + elif match_count > 1: + raise MCTIntrinsicCalibrationError( + message=f"Identifier {identifier} is associated with multiple results.") + return matching_result_metadata + + def list_image_metadata(self) -> list[_ImageMetadata]: + return list(self._data_ledger.image_metadata_list) + + def list_result_metadata(self) -> list[_ResultMetadata]: + return list(self._data_ledger.result_metadata_list) + + def _load_data_ledger(self) -> bool: + """ + :return: True if loaded or if it can be created without overwriting existing data. False otherwise. + """ + json_dict: dict + load_success: bool + json_dict, load_success = self._load_dict_from_filepath(filepath=self._data_ledger_filepath) + if not load_success: + return False + try: + self._data_ledger = _DataLedger(**json_dict) + except ValidationError as e: + logger.error(e) + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message="Failed to parse data ledger from file.") + return False + return True + def _load_dict_from_filepath( self, filepath: str @@ -147,10 +280,10 @@ def _load_dict_from_filepath( if not os.path.exists(filepath): return dict(), True elif not os.path.isfile(filepath): - logger.critical(f"Calibration map file location {filepath} exists but is not a file.") + logger.critical(f"Json file location {filepath} exists but is not a file.") self._status_message_source.enqueue_status_message( severity=SeverityLabel.CRITICAL, - message="Filepath location for calibration map exists but is not a file. " + message="Filepath location for json exists but is not a file. " "Most likely a directory exists at that location, " "and it needs to be manually removed.") return dict(), False @@ -161,60 +294,142 @@ def _load_dict_from_filepath( message=msg), on_error_for_dev=logger.error) if not json_dict: - logger.error(f"Failed to load calibration map from file {filepath}.") + logger.error(f"Failed to load json from file {filepath}.") self._status_message_source.enqueue_status_message( severity=SeverityLabel.ERROR, - message="Failed to load calibration map from file.") + message="Failed to load json from file.") return dict(), False return json_dict, True + def load_image( + self, + identifier: str + ) -> str: # image in base64 + match_count: int = 0 + matching_metadata: _ImageMetadata | None = None + for image_metadata in self._data_ledger.image_metadata_list: + if image_metadata.identifier == identifier: + match_count += 1 + matching_metadata = image_metadata + break + if match_count < 1: + raise MCTIntrinsicCalibrationError( + message=f"Identifier {identifier} is not associated with any image.") + elif match_count > 1: + raise MCTIntrinsicCalibrationError( + message=f"Identifier {identifier} is associated with multiple images.") + + if not os.path.exists(matching_metadata.filepath): + raise MCTIntrinsicCalibrationError(message=f"File does not exist for image {identifier}.") + image_bytes: bytes + try: + with (open(matching_metadata.filepath, 'rb') as in_file): + image_bytes = in_file.read() + except OSError: + raise MCTIntrinsicCalibrationError(message=f"Failed to open image {identifier}.") + image_base64 = ImageUtils.bytes_to_base64(image_bytes=image_bytes) + return image_base64 + + def _load_result( + self, + identifier: str, + result_type: type[BaseModel] + ) -> ...: + metadata: _ResultMetadata = self._get_result_metadata_by_identifier(identifier=identifier) + return self._load_result_by_metadata(metadata=metadata, result_type=result_type) + + def _load_result_by_metadata( + self, + metadata: _ResultMetadata, + result_type: type[BaseModel] + ) -> ...: + """ + Read the calibration result corresponding to the provided metadata. + """ + json_dict: dict + load_success: bool + json_dict, load_success = self._load_dict_from_filepath(metadata.filepath) + if not load_success: + raise MCTIntrinsicCalibrationError(message=f"Failed to load result {metadata.identifier}.") + result: result_type = result_type(**json_dict) + return result + + def _save_data_ledger(self) -> None: + return self._save_dict_to_filepath( + filepath=self._data_ledger_filepath, + json_dict=self._data_ledger.model_dump()) + def _save_dict_to_filepath( self, filepath: str, - json_dict: dict + json_dict: dict, + ignore_none: bool = False ) -> None: + """ + :param filepath: Where to write the file + :param json_dict: What to write to the file + :param ignore_none: See IOUtils.json_write + """ IOUtils.json_write( filepath=filepath, json_dict=json_dict, on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( severity=SeverityLabel.ERROR, message=msg), - on_error_for_dev=logger.error) + on_error_for_dev=logger.error, + ignore_none=ignore_none) - def _save_image_to_filepath( + def update_image_metadata( self, - filepath: str, - image: numpy.ndarray - ) -> bool: - """ - Returns true if successful, False otherwise. - """ - # Before making any changes to the calibration map, make sure folders exist - if not self._exists_on_filesystem(path=os.path.dirname(filepath), pathtype="path", create_path=True): - message = "Failed to create storage location for input image." - logger.error(message) - self._status_message_source.enqueue_status_message(severity=SeverityLabel.ERROR, message=message) - return False - # Also make sure that this file does not somehow already exist (highly unlikely) - if os.path.exists(filepath): - logger.error(f"Image {filepath} appears to already exist. This is never expected to occur.") + image_identifier: str, + image_state: _ImageState, + image_label: str | None + ) -> None: + match_count: int = 0 + for image in self._data_ledger.image_metadata_list: + if image.identifier == image_identifier: + image.state = image_state + if image_label is not None: + image.image_label = image_label + match_count += 1 + break + if match_count < 1: + raise MCTIntrinsicCalibrationError( + message=f"Identifier {image_identifier} is not associated with any image.") + elif match_count > 1: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Image appears to already exist. This is never expected to occur. " - "Please try again, and if this error continues to occur then please report a bug.") - return False - image_bytes: bytes - image_bytes = ImageUtils.image_to_bytes(image_data=image, image_format=IntrinsicCalibrator.IMAGE_FORMAT) - try: - with (open(filepath, 'wb') as in_file): - in_file.write(image_bytes) - except IOError as e: - logger.error(f"Failed to save image to {filepath}, reason: {str(e)}.") + severity=SeverityLabel.WARNING, + message=f"Identifier {image_identifier} is associated with multiple images. " + "This suggests that the data ledger is in an inconsistent state. " + "It may be prudent to either manually correct it, or recreate it.") + self._save_data_ledger() + + def update_result_metadata( + self, + identifier: str, + state: _ResultState, + result_label: str | None = None + ) -> None: + match_count: int = 0 + for result in self._data_ledger.result_metadata_list: + if result.identifier == identifier: + result.state = state + if result_label is not None: + result.result_label = result_label + match_count += 1 + break + + if match_count < 1: + raise MCTIntrinsicCalibrationError( + message=f"Identifier {identifier} is not associated with any result.") + elif match_count > 1: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Failed to save image - see calibration log for more details.") - return False - return True + severity=SeverityLabel.WARNING, + message=f"Identifier {identifier} is associated with multiple results. " + "This suggests that the data ledger is in an inconsistent state. " + "It may be prudent to either manually correct it, or recreate it.") + + self._save_data_ledger() # ===================================================================================================================== @@ -222,101 +437,47 @@ def _save_image_to_filepath( # ===================================================================================================================== -class _IntrinsicDataMapValue(BaseModel): - image_metadata_list: list[_ImageMetadata] = Field(default_factory=list) - result_metadata_list: list[_ResultMetadata] = Field(default_factory=list) - - -class _IntrinsicDataMapEntry(BaseModel): - key: ImageResolution = Field() - value: _IntrinsicDataMapValue = Field() - - -class _IntrinsicDataMap(BaseModel): - entries: list[_IntrinsicDataMapEntry] = Field(default_factory=list) - - def as_dict(self) -> dict[ImageResolution, _IntrinsicDataMapValue]: - return_value: dict[ImageResolution, _IntrinsicDataMapValue] = dict() - for entry in self.entries: - if entry.key not in return_value: - return_value[entry.key] = _IntrinsicDataMapValue() - for image_metadata in entry.value.image_metadata_list: - return_value[entry.key].image_metadata_list.append(image_metadata) - for result_metadata in entry.value.result_metadata_list: - return_value[entry.key].result_metadata_list.append(result_metadata) - return return_value - - @staticmethod - def from_dict(in_dict: dict[ImageResolution, _IntrinsicDataMapValue]): - entries: list[_IntrinsicDataMapEntry] = list() - for key in in_dict.keys(): - entries.append(_IntrinsicDataMapEntry(key=key, value=in_dict[key])) - return _IntrinsicDataMap(entries=entries) +class IntrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + image_resolution: ImageResolution = Field() + calibrated_values: IntrinsicParameters = Field() + supplemental_data: dict = Field() -class IntrinsicCalibrator(Calibrator, abc.ABC): +class IntrinsicCalibrator(AbstractCalibrator, abc.ABC): Configuration: type[_Configuration] = _Configuration ImageState: type[_ImageState] = _ImageState ImageMetadata: type[_ImageMetadata] = _ImageMetadata ResultState: type[_ResultState] = _ResultState ResultMetadata: type[_ResultMetadata] = _ResultMetadata - DataMap: type[_IntrinsicDataMap] = _IntrinsicDataMap - - _calibration_map: dict[ImageResolution, _IntrinsicDataMapValue] - - _data_path: str - _data_ledger_filepath: str - CALIBRATION_MAP_FILENAME: Final[str] = "intrinsic_calibrations.json" - - IMAGE_FORMAT: Final[str] = ".png" # work in lossless image format - RESULT_FORMAT: Final[str] = ".json" def __init__( self, configuration: Configuration, status_message_source: StatusMessageSource ): - super().__init__(status_message_source=status_message_source) - - self._data_path = configuration.data_path - if not self._exists_on_filesystem(path=self._data_path, pathtype="path", create_path=True): - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.CRITICAL, - message="Data path does not exist and could not be created.") - detailed_message: str = f"{self._data_path} does not exist and could not be created." - logger.critical(detailed_message) - raise RuntimeError(detailed_message) - - self._data_ledger_filepath = os.path.join(self._data_path, IntrinsicCalibrator.CALIBRATION_MAP_FILENAME) - if not self.load(): - message: str = "The calibration map could not be loaded or created. "\ - "In order to avoid data loss, the software will now abort. " \ - "Please manually correct or remove the file in the filesystem." - logger.critical(message) - self._status_message_source.enqueue_status_message(severity=SeverityLabel.CRITICAL, message=message) - raise RuntimeError(message) + super().__init__( + status_message_source=status_message_source, + data_path=configuration.data_path) def add_image( self, image_base64: str, - detector_label: str = "" + detector_label: str = "", ) -> str: # id of image - image_data: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") - map_key: ImageResolution = ImageResolution(x_px=image_data.shape[1], y_px=image_data.shape[0]) - image_identifier: str = str(uuid.uuid4()) - image_filepath = self._image_filepath(map_key=map_key, image_identifier=image_identifier) - self._save_image_to_filepath( - filepath=image_filepath, - image=image_data) - if map_key not in self._calibration_map: - self._calibration_map[map_key] = _IntrinsicDataMapValue() - self._calibration_map[map_key].image_metadata_list.append( - IntrinsicCalibrator.ImageMetadata( - identifier=image_identifier, - detector_label=detector_label, - image_resolution=map_key)) - self.save() - return image_identifier + image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") + identifier: str = str(uuid.uuid4()) + resolution: ImageResolution = ImageResolution(x_px=image.shape[1], y_px=image.shape[0]) + filepath = os.path.join(self._data_path, str(resolution), identifier + ImageFormat.FORMAT_PNG) + metadata: _ImageMetadata = IntrinsicCalibrator.ImageMetadata( + identifier=identifier, + filepath=filepath, + detector_label=detector_label, + resolution=resolution) + self._add_image( + image=image, + metadata=metadata) + return metadata.identifier def calculate( self, @@ -326,187 +487,79 @@ def calculate( :returns: a tuple containing a result identifier (GUID as string) and the IntrinsicCalibration structure """ - calibration_key: ImageResolution = image_resolution - if calibration_key not in self._calibration_map: - raise MCTIntrinsicCalibrationError( - message=f"No images for given resolution {str(image_resolution)} found.") - - result_identifier: str = str(uuid.uuid4()) - result_filepath = self._result_filepath( - map_key=calibration_key, - result_identifier=result_identifier) - - calibration_value: _IntrinsicDataMapValue = self._calibration_map[calibration_key] - # don't load images right away in case of memory constraints - image_identifiers: list[str] = list() - for image_metadata in calibration_value.image_metadata_list: + image_metadata_list: list[_ImageMetadata] = list() # image metadata available for calibration + for image_index, image_metadata in enumerate(self._data_ledger.image_metadata_list): + if image_metadata.resolution != image_resolution: + continue if image_metadata.state != _ImageState.SELECT: continue - image_filepath: str = self._image_filepath( - map_key=calibration_key, - image_identifier=image_metadata.identifier) - if not self._exists_on_filesystem(path=image_filepath, pathtype="filepath"): + if not self._exists_on_filesystem(path=image_metadata.filepath, pathtype="filepath"): self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, + severity=SeverityLabel.ERROR, message=f"Image {image_metadata.identifier} was not found. " - f"It will be omitted from the calibration.") + "This suggests that the data ledger is in an inconsistent state. " + "It will be omitted from the calibration.") continue - image_identifiers.append(image_metadata.identifier) + image_metadata_list.append(image_metadata) + + if len(image_metadata_list) == 0: + raise MCTIntrinsicCalibrationError(message=f"No images found for resolution {str(image_resolution)}.") - intrinsic_calibration, image_identifiers = self._calculate_implementation( + intrinsic_calibration, image_metadata_list = self._calculate_implementation( image_resolution=image_resolution, - image_identifiers=image_identifiers) + image_metadata_list=image_metadata_list) - IOUtils.json_write( - filepath=result_filepath, - json_dict=intrinsic_calibration.model_dump(), - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=msg), - on_error_for_dev=logger.error, - ignore_none=True) + result_identifier: str = str(uuid.uuid4()) + result_filepath = \ + os.path.join(self._data_path, str(image_resolution), result_identifier + _RESULT_FORMAT) result_metadata: IntrinsicCalibrator.ResultMetadata = IntrinsicCalibrator.ResultMetadata( identifier=result_identifier, - image_identifiers=image_identifiers) - if len(self._calibration_map[calibration_key].result_metadata_list) == 0: - result_metadata.state = _ResultState.ACTIVE # No active result yet, so make this one active - self._calibration_map[calibration_key].result_metadata_list.append(result_metadata) - self.save() + filepath=result_filepath, + resolution=image_resolution, + image_identifiers=[image_metadata.identifier for image_metadata in image_metadata_list]) + self._add_result( + result=intrinsic_calibration.model_dump(), + metadata=result_metadata) + + # For now, assume that the user's intent is to set any new calibration to be the active one + self.update_result_metadata( + identifier=result_metadata.identifier, + state=_ResultState.ACTIVE) + return result_identifier, intrinsic_calibration @abc.abstractmethod def _calculate_implementation( self, image_resolution: ImageResolution, - image_identifiers: list[str] - ) -> tuple[IntrinsicCalibration, list[str]]: # image_identifiers that were actually used in calibration + image_metadata_list: list[ImageMetadata] + ) -> tuple[IntrinsicCalibration, list[ImageMetadata]]: # metadata of images that were actually used in calibration pass - def delete_staged(self) -> None: - for calibration_key in self._calibration_map.keys(): - calibration_value: _IntrinsicDataMapValue = self._calibration_map[calibration_key] - image_indices_to_delete: list = list() - for image_index, image in enumerate(calibration_value.image_metadata_list): - if image.state == _ImageState.DELETE: - self._delete_if_exists(self._image_filepath( - map_key=calibration_key, - image_identifier=image.identifier)) - image_indices_to_delete.append(image_index) - for i in reversed(image_indices_to_delete): - del calibration_value.image_metadata_list[i] - result_indices_to_delete: list = list() - for result_index, result in enumerate(calibration_value.result_metadata_list): - if result.state == _ResultState.DELETE: - self._delete_if_exists(self._result_filepath( - map_key=calibration_key, - result_identifier=result.identifier)) - result_indices_to_delete.append(result_index) - for i in reversed(result_indices_to_delete): - del calibration_value.result_metadata_list[i] - self.save() - - # noinspection DuplicatedCode - def get_image( - self, - image_identifier: str - ) -> str: # image in base64 - found_count: int = 0 - matching_image_resolution: ImageResolution | None = None - for image_resolution in self._calibration_map: - for image in self._calibration_map[image_resolution].image_metadata_list: - if image.identifier == image_identifier: - found_count += 1 - matching_image_resolution = image_resolution - break - if found_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Image identifier {image_identifier} is not associated with any image.") - elif found_count > 1: - raise MCTIntrinsicCalibrationError( - message=f"Image identifier {image_identifier} is associated with multiple images.") - - image_filepath = self._image_filepath( - map_key=matching_image_resolution, - image_identifier=image_identifier) - if not os.path.exists(image_filepath): - raise MCTIntrinsicCalibrationError( - message=f"File does not exist for image {image_identifier} " - f"and given resolution {str(matching_image_resolution)}.") - image_bytes: bytes - try: - with (open(image_filepath, 'rb') as in_file): - image_bytes = in_file.read() - except OSError: - raise MCTIntrinsicCalibrationError( - message=f"Failed to open image {image_identifier} for " - f"given resolution {str(matching_image_resolution)}.") - image_base64 = ImageUtils.bytes_to_base64(image_bytes=image_bytes) - return image_base64 - - # noinspection DuplicatedCode def get_result( self, result_identifier: str ) -> IntrinsicCalibration: - found_count: int = 0 - matching_image_resolution: ImageResolution | None = None - for image_resolution in self._calibration_map: - for result in self._calibration_map[image_resolution].result_metadata_list: - if result.identifier == result_identifier: - found_count += 1 - matching_image_resolution = image_resolution - break - if found_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Image identifier {result_identifier} is not associated with any result.") - elif found_count > 1: - raise MCTIntrinsicCalibrationError( - message=f"Image identifier {result_identifier} is associated with multiple results.") - - return self._get_result_calibration_from_file( - image_resolution=matching_image_resolution, - result_identifier=result_identifier) + return self._load_result( + identifier=result_identifier, + result_type=IntrinsicCalibration) - def get_result_active( + def get_result_active_by_image_resolution( self, - image_resolution: ImageResolution - ) -> IntrinsicCalibration | None: - active_count: int = 0 + image_resolution: ImageResolution, + ) -> Optional[...]: + match_count: int = 0 matched_metadata: IntrinsicCalibrator.ResultMetadata | None = None - if image_resolution in self._calibration_map: - result_count: int = len(self._calibration_map[image_resolution].result_metadata_list) - if result_count > 0: - matched_metadata = self._calibration_map[image_resolution].result_metadata_list[0] - if matched_metadata.state == _ResultState.ACTIVE: - active_count = 1 - for result_index in range(1, result_count): - result_metadata = self._calibration_map[image_resolution].result_metadata_list[result_index] - if matched_metadata.state == _ResultState.DELETE: - matched_metadata = result_metadata - continue # basically we ignore any data staged for DELETE - elif matched_metadata.state == _ResultState.RETAIN: - if result_metadata.state == _ResultState.ACTIVE: - active_count += 1 - matched_metadata = result_metadata - continue # ACTIVE shall of course take priority - elif result_metadata.timestamp_utc() > matched_metadata.timestamp_utc(): - matched_metadata = result_metadata - else: # matched_result_metadata.state == CalibrationResultState.ACTIVE: - if result_metadata.state == _ResultState.ACTIVE: - # BOTH metadata are marked ACTIVE. This is not expected to occur. Indicates a problem. - active_count += 1 - if result_metadata.timestamp_utc() > matched_metadata.timestamp_utc(): - matched_metadata = result_metadata - if matched_metadata is None or \ - matched_metadata.state == _ResultState.DELETE: # no result that is not marked DELETE - return None + for result_metadata in self._data_ledger.result_metadata_list: + if result_metadata.state == _ResultState.ACTIVE and result_metadata.resolution == image_resolution: + matched_metadata = result_metadata - if active_count < 1: + if match_count < 1: self._status_message_source.enqueue_status_message( severity=SeverityLabel.WARNING, message=f"No result metadata is active for resolution {str(image_resolution)}." "Returning latest result.") - elif active_count > 1: + if match_count > 1: self._status_message_source.enqueue_status_message( severity=SeverityLabel.WARNING, message=f"Multiple result metadata are active for resolution {str(image_resolution)}. " @@ -514,176 +567,57 @@ def get_result_active( "To recover from this ambiguous state, it is strong recommended to explicitly set " "one of the results as \"active\", which will reset others to \"retain\".") - return self._get_result_calibration_from_file( - image_resolution=image_resolution, - result_identifier=matched_metadata.identifier) - - def _get_result_calibration_from_file( - self, - image_resolution: ImageResolution, - result_identifier: str - ) -> IntrinsicCalibration: - result_filepath = self._result_filepath( - map_key=image_resolution, - result_identifier=result_identifier) - if not os.path.exists(result_filepath): - raise MCTIntrinsicCalibrationError( - message=f"File does not exist for result {result_identifier} " - f"and given resolution {str(image_resolution)}.") - result_json_raw: str - try: - with (open(result_filepath, 'r') as in_file): - result_json_raw = in_file.read() - except OSError: - raise MCTIntrinsicCalibrationError( - message=f"Failed to open result {result_identifier} for " - f"given resolution {str(image_resolution)}.") - result_json_dict: dict - try: - result_json_dict = dict(json.loads(result_json_raw)) - except JSONDecodeError: - raise MCTIntrinsicCalibrationError( - message=f"Failed to parse result {result_identifier} for " - f"given resolution {str(image_resolution)}.") - intrinsic_calibration: IntrinsicCalibration = IntrinsicCalibration(**result_json_dict) - return intrinsic_calibration + if matched_metadata is None: + return None - def _image_filepath( - self, - map_key: ImageResolution, - image_identifier: str - ) -> str: - return os.path.join( - self._data_path, - str(map_key), - image_identifier + IntrinsicCalibrator.IMAGE_FORMAT) + return self._load_result_by_metadata( + metadata=matched_metadata, + result_type=IntrinsicCalibration) def list_resolutions(self) -> list[ImageResolution]: - resolutions: list[ImageResolution] = list(self._calibration_map.keys()) + resolutions_as_str: set[str] = {str(metadata.resolution) for metadata in self._data_ledger.image_metadata_list} + resolutions: list[ImageResolution] = [ImageResolution.from_str(resolution) for resolution in resolutions_as_str] return resolutions - # noinspection DuplicatedCode - def list_image_metadata( + def list_image_metadata_by_image_resolution( self, image_resolution: ImageResolution ) -> list[ImageMetadata]: image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] = list() - map_key: ImageResolution = image_resolution - if map_key in self._calibration_map: - image_metadata_list = self._calibration_map[map_key].image_metadata_list + for metadata in self._data_ledger.image_metadata_list: + if metadata.resolution == image_resolution: + image_metadata_list.append(metadata) return image_metadata_list - # noinspection DuplicatedCode - def list_result_metadata( + def list_result_metadata_by_image_resolution( self, image_resolution: ImageResolution ) -> list[ResultMetadata]: result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] = list() - map_key: ImageResolution = image_resolution - if map_key in self._calibration_map: - result_metadata_list = self._calibration_map[map_key].result_metadata_list + for metadata in self._data_ledger.result_metadata_list: + if metadata.resolution == image_resolution: + result_metadata_list.append(metadata) return result_metadata_list - def load(self) -> bool: - """ - :return: True if loaded or if it can be created without overwriting existing data. False otherwise. - """ - json_dict: dict - load_success: bool - json_dict, load_success = self._load_dict_from_filepath(filepath=self._data_ledger_filepath) - if not load_success: - return False - calibration_map: IntrinsicCalibrator.DataMap - try: - calibration_map = IntrinsicCalibrator.DataMap(**json_dict) - except ValidationError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Failed to parse calibration map from file.") - return False - self._calibration_map = calibration_map.as_dict() - return True - - def _result_filepath( - self, - map_key: ImageResolution, - result_identifier: str - ) -> str: - return os.path.join( - self._data_path, - str(map_key), - result_identifier + IntrinsicCalibrator.RESULT_FORMAT) - - def save(self) -> None: - return self._save_dict_to_filepath( - filepath=self._data_ledger_filepath, - json_dict=IntrinsicCalibrator.DataMap.from_dict(self._calibration_map).model_dump()) - - # noinspection DuplicatedCode - def update_image_metadata( - self, - image_identifier: str, - image_state: ImageState, - image_label: str | None - ) -> None: - found_count: int = 0 - for map_key in self._calibration_map: - for image in self._calibration_map[map_key].image_metadata_list: - if image.identifier == image_identifier: - image.state = image_state - if image_label is not None: - image.image_label = image_label - found_count += 1 - break - if found_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Image identifier {image_identifier} is not associated with any image.") - elif found_count > 1: - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, - message=f"Image identifier {image_identifier} is associated with multiple images.") - self.save() - - # noinspection DuplicatedCode def update_result_metadata( self, - result_identifier: str, - result_state: ResultState, + identifier: str, + state: ResultState, result_label: str | None = None ) -> None: - found_count: int = 0 - matching_map_keys: set[ImageResolution] = set() # Normally this shall be of size exactly 1 - for map_key in self._calibration_map: - for result in self._calibration_map[map_key].result_metadata_list: - if result.identifier == result_identifier: - result.state = result_state - if result_label is not None: - result.result_label = result_label - found_count += 1 - matching_map_keys.add(map_key) - break + super().update_result_metadata( + identifier=identifier, + state=state, + result_label=result_label) # Some cleanup as applicable - if result_state == _ResultState.ACTIVE: - for map_key in matching_map_keys: - # If size greater than 1, something is wrong... but nonetheless - # we'll ensure there is only one active result per resolution - for result in self._calibration_map[map_key].result_metadata_list: - if result.identifier != result_identifier and result.state == _ResultState.ACTIVE: - result.state = _ResultState.RETAIN - - if found_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Result identifier {result_identifier} is not associated with any result.") - elif found_count > 1: - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, - message=f"Result identifier {result_identifier} is associated with multiple results. " - "This suggests that the calibration map is in an inconsistent state. " - "It may be prudent to either manually correct it, or recreate it.") - - self.save() + if state == _ResultState.ACTIVE: + matching_metadata: _ResultMetadata = self._get_result_metadata_by_identifier(identifier=identifier) + for metadata in self._data_ledger.result_metadata_list: + if metadata.resolution == matching_metadata.resolution and \ + metadata.identifier != matching_metadata.identifier: + metadata.state = _ResultState.RETAIN # Only one ACTIVE per resolution + self._save_data_ledger() # ===================================================================================================================== @@ -691,59 +625,118 @@ def update_result_metadata( # ===================================================================================================================== -class _ExtrinsicDataListing(BaseModel): - image_metadata_list: list[_ImageMetadata] = Field(default_factory=list) - result_metadata_list: list[_ResultMetadata] = Field(default_factory=list) +class ExtrinsicCalibrationDetectorResult(BaseModel): + detector_label: str = Field() + detector_to_reference: Matrix4x4 = Field() -class ExtrinsicCalibrator(abc.ABC): +class ExtrinsicCalibration(BaseModel): + timestamp_utc: str = Field() + calibrated_values: list[ExtrinsicCalibrationDetectorResult] = Field() + supplemental_data: dict = Field() + + +class ExtrinsicCalibrator(AbstractCalibrator, abc.ABC): Configuration: type[_Configuration] = _Configuration ImageState: type[_ImageState] = _ImageState ImageMetadata: type[_ImageMetadata] = _ImageMetadata ResultState: type[_ResultState] = _ResultState ResultMetadata: type[_ResultMetadata] = _ResultMetadata - _image_filepaths: dict[tuple[str, str], str] # (detector_id, timestamp_iso8601) -> image_filepath - - DATA_FILENAME: Final[str] = "extrinsic_calibrations.json" - def __init__( self, configuration: Configuration, status_message_source: StatusMessageSource ): - self._configuration = configuration - self._status_message_source = status_message_source - if not self._exists_on_filesystem(path=self._configuration.data_path, pathtype="path", create_path=True): - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.CRITICAL, - message="Data path does not exist and could not be created.") - detailed_message: str = f"{self._configuration.data_path} does not exist and could not be created." - logger.critical(detailed_message) - raise RuntimeError(detailed_message) - if not self.load(): - message: str = "The calibration map could not be loaded or created. "\ - "In order to avoid data loss, the software will now abort. " \ - "Please manually correct or remove the file in the filesystem." - logger.critical(message) - self._status_message_source.enqueue_status_message(severity=SeverityLabel.CRITICAL, message=message) - raise RuntimeError(message) + super().__init__( + status_message_source=status_message_source, + data_path=configuration.data_path) + + def add_image( + self, + image_base64: str, + detector_label: str + ) -> str: # id of image + image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") + identifier: str = str(uuid.uuid4()) + resolution: ImageResolution = ImageResolution(x_px=image.shape[1], y_px=image.shape[0]) + filepath = os.path.join(self._data_path, str(resolution), identifier + ImageFormat.FORMAT_PNG) + metadata: _ImageMetadata = ExtrinsicCalibrator.ImageMetadata( + identifier=identifier, + filepath=filepath, + detector_label=detector_label, + resolution=resolution) + self._add_image( + image=image, + metadata=metadata) + return metadata.identifier + + def calculate( + self, + detector_labels: list[str], + detector_intrinsics: list[IntrinsicParameters] + ) -> tuple[str, ExtrinsicCalibration]: + """ + :returns: a tuple containing a result identifier (GUID as string) and the ExtrinsicCalibration structure + """ + + image_metadata_list: list[_ImageMetadata] = list() # image metadata available for calibration + for image_index, image_metadata in enumerate(self._data_ledger.image_metadata_list): + if image_metadata.state != _ImageState.SELECT: + continue + if not self._exists_on_filesystem(path=image_metadata.filepath, pathtype="filepath"): + self._status_message_source.enqueue_status_message( + severity=SeverityLabel.ERROR, + message=f"Image {image_metadata.identifier} was not found. " + "This suggests that the data ledger is in an inconsistent state. " + "It will be omitted from the calibration.") + continue + image_metadata_list.append(image_metadata) - # data: - # per detector: - # initial_frame transform to reference_target - # final transform to reference_target - # per frame: - # image - # (marker_id,2d_points)s - # final (frame_id,marker_id,3d_points)s - # - # input data: - # per detector: - # per frame: - # PNG: image - # - # output data: - # per detector: - # JSON: transform to reference_target - # JSON: Additional stats, inc. reference_target definition + if len(image_metadata_list) == 0: + raise MCTIntrinsicCalibrationError(message=f"No images found for calibration.") + + if detector_labels != list(set(detector_labels)): + raise MCTIntrinsicCalibrationError(message=f"Detector labels must not contain duplicated elements.") + + if len(detector_labels) != len(detector_intrinsics): + raise MCTIntrinsicCalibrationError(message=f"Expected detector labels and intrinsics to be of same size.") + + detector_intrinsics_by_label: dict[str, IntrinsicParameters] = dict(zip(detector_labels, detector_intrinsics)) + + extrinsic_calibration, image_metadata_list = self._calculate_implementation( + detector_intrinsics_by_label=detector_intrinsics_by_label, + image_metadata_list=image_metadata_list) + + result_identifier: str = str(uuid.uuid4()) + result_filepath = os.path.join(self._data_path, result_identifier + _RESULT_FORMAT) + result_metadata: ExtrinsicCalibration.ResultMetadata = ExtrinsicCalibration.ResultMetadata( + identifier=result_identifier, + filepath=result_filepath, + image_identifiers=[image_metadata.identifier for image_metadata in image_metadata_list]) + self._add_result( + result=extrinsic_calibration.model_dump(), + metadata=result_metadata) + + # For now, assume that the user's intent is to set any new calibration to be the active one + self.update_result_metadata( + identifier=result_metadata.identifier, + state=_ResultState.ACTIVE) + + return result_identifier, extrinsic_calibration + + @abc.abstractmethod + def _calculate_implementation( + self, + detector_intrinsics_by_label: dict[str, IntrinsicParameters], + image_metadata_list: list[ImageMetadata] + ): + pass + + def get_result( + self, + result_identifier: str + ) -> ExtrinsicCalibration: + return self._load_result( + identifier=result_identifier, + result_type=ExtrinsicCalibration) diff --git a/src/detector/detector.py b/src/detector/detector.py index c207b36..4199f12 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -146,7 +146,7 @@ def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | Error arg_type=CalibrationImageGetRequest) image_base64: str try: - image_base64 = self._calibrator.get_image(image_identifier=request.image_identifier) + image_base64 = self._calibrator.load_image(identifier=request.image_identifier) except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationImageGetResponse(image_base64=image_base64) @@ -158,7 +158,7 @@ def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataL arg_type=CalibrationImageMetadataListRequest) image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] try: - image_metadata_list = self._calibrator.list_image_metadata( + image_metadata_list = self._calibrator.list_image_metadata_by_image_resolution( image_resolution=request.image_resolution) except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) @@ -202,7 +202,7 @@ def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActive intrinsic_calibration: IntrinsicCalibration | None try: image_resolution: ImageResolution = self._camera.get_resolution() - intrinsic_calibration = self._calibrator.get_result_active(image_resolution=image_resolution) + intrinsic_calibration = self._calibrator.get_result_active_by_image_resolution(image_resolution=image_resolution) except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) return CalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) @@ -214,7 +214,7 @@ def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadat arg_type=CalibrationResultMetadataListRequest) result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] try: - result_metadata_list = self._calibrator.list_result_metadata( + result_metadata_list = self._calibrator.list_result_metadata_by_image_resolution( image_resolution=request.image_resolution) except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) @@ -227,8 +227,8 @@ def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorR arg_type=CalibrationResultMetadataUpdateRequest) try: self._calibrator.update_result_metadata( - result_identifier=request.result_identifier, - result_state=request.result_state, + identifier=request.result_identifier, + state=request.result_state, result_label=request.result_label) except MCTIntrinsicCalibrationError as e: return ErrorResponse(message=e.message) diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py new file mode 100644 index 0000000..3c3df9a --- /dev/null +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -0,0 +1,54 @@ +from .common_aruco_opencv import ArucoOpenCVCommon +from src.common import \ + ExtrinsicCalibration, \ + ExtrinsicCalibrator, \ + IntrinsicParameters +import cv2 +import cv2.aruco + + +class CharucoOpenCVIntrinsicCalibrator(ExtrinsicCalibrator): + def _calculate_implementation( + self, + detector_intrinsics_by_label: dict[str, IntrinsicParameters], + image_metadata_list: list[ExtrinsicCalibrator.ImageMetadata] + ) -> tuple[ExtrinsicCalibration, list[ExtrinsicCalibrator.ImageMetadata]]: + aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() + + charuco_spec = ArucoOpenCVCommon.CharucoBoard() + charuco_board: cv2.aruco.CharucoBoard = charuco_spec.create_board() + + raise NotImplementedError() + + # data: + # per detector: + # initial_frame transform to reference_target + # final transform to reference_target + # per frame: + # image + # (marker_id,2d_points)s + # final (frame_id,marker_id,3d_points)s + # + # input data: + # per detector: + # per frame: + # PNG: image + # + # output data: + # per detector: + # JSON: transform to reference_target + # JSON: Additional stats, inc. reference_target definition + + # Constraint: Reference board must be visible to all cameras for first frame_id (frame_0) + # - Estimate camera position relative to frame_0 + # MathUtils.estimate_matrix_transform_to_detector() + # - Convert points to rays for all (camera_id, frame_id) using frame_0 as basis + # MathUtils.convert_detector_corners_to_vectors() + # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. + # MathUtils.closest_intersection_between_n_lines() + # - Refine camera positions based on working_points via PnP + # MathUtils.estimate_matrix_transform_to_detector() + # Iterate max times or until convergence: + # - Convert points to rays for all (camera_id, frame_id), using working_points as basis + # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. + # - Refine camera positions based on working_points via PnP diff --git a/src/implementations/intrinsic_charuco_opencv.py b/src/implementations/intrinsic_charuco_opencv.py index 7b3325f..78cabac 100644 --- a/src/implementations/intrinsic_charuco_opencv.py +++ b/src/implementations/intrinsic_charuco_opencv.py @@ -16,8 +16,8 @@ class CharucoOpenCVIntrinsicCalibrator(IntrinsicCalibrator): def _calculate_implementation( self, image_resolution: ImageResolution, - image_identifiers: list[str] - ) -> tuple[IntrinsicCalibration, list[str]]: # image_identifiers that were actually used in calibration + image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] + ) -> tuple[IntrinsicCalibration, list[IntrinsicCalibrator.ImageMetadata]]: aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() # mismatched_keys: list[str] = ArucoOpenCVAnnotator.assign_key_value_list_to_aruco_detection_parameters( @@ -32,12 +32,9 @@ def _calculate_implementation( all_charuco_corners = list() all_charuco_ids = list() - used_image_identifiers: list[str] = list() - for image_identifier in image_identifiers: - image_filepath: str = self._image_filepath( - map_key=image_resolution, - image_identifier=image_identifier) - image_rgb = cv2.imread(image_filepath) + used_image_metadata: list[IntrinsicCalibrator.ImageMetadata] = list() + for metadata in image_metadata_list: + image_rgb = cv2.imread(metadata.filepath) image_greyscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) (marker_corners, marker_ids, _) = cv2.aruco.detectMarkers( image=image_greyscale, @@ -46,10 +43,10 @@ def _calculate_implementation( if len(marker_corners) <= 0: self._status_message_source.enqueue_status_message( severity=SeverityLabel.WARNING, - message=f"Image {image_identifier} did not appear to contain any identifiable markers. " + message=f"Image {metadata.identifier} did not appear to contain any identifiable markers. " f"It will be omitted from the calibration.") continue - used_image_identifiers.append(image_identifier) + used_image_metadata.append(metadata) # Note: # Marker corners are the corners of the markers, whereas # ChArUco corners are the corners of the chessboard. @@ -91,7 +88,7 @@ def _calculate_implementation( "calibrated_stdevs": [value[0] for value in charuco_intrinsic_stdevs], # "marker_parameters": marker_parameters, "frame_results": [{ - "image_identifier": image_identifiers[i], + "image_identifier": used_image_metadata[i].identifier, "translation": [ charuco_translation_vectors[i][0, 0], charuco_translation_vectors[i][1, 0], @@ -134,4 +131,4 @@ def _calculate_implementation( float(charuco_distortion_coefficients[3, 0])]), supplemental_data=supplemental_data) - return intrinsic_calibration, used_image_identifiers + return intrinsic_calibration, used_image_metadata diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index c7368ed..6f6aa26 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -107,16 +107,3 @@ def test(self): message=message) print(message) - # Constraint: Reference board must be visible to all cameras for first frame_id (frame_0) - # - Estimate camera position relative to frame_0 - # MathUtils.estimate_matrix_transform_to_detector() - # - Convert points to rays for all (camera_id, frame_id) using frame_0 as basis - # MathUtils.convert_detector_corners_to_vectors() - # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. - # MathUtils.closest_intersection_between_n_lines() - # - Refine camera positions based on working_points via PnP - # MathUtils.estimate_matrix_transform_to_detector() - # Iterate max times or until convergence: - # - Convert points to rays for all (camera_id, frame_id), using working_points as basis - # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. - # - Refine camera positions based on working_points via PnP From a4f5ca375f4c49d63666b2c9cfe507dc61a73121 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Fri, 8 Aug 2025 19:07:24 -0400 Subject: [PATCH 14/33] WIP: Initial implementation of extrinsic calibration based on ChArUco --- src/common/__init__.py | 2 + src/common/calibration.py | 30 +- src/common/math.py | 24 +- .../specialized/calibration_image_table.py | 2 +- src/implementations/annotator_aruco_opencv.py | 35 +-- src/implementations/common_aruco_opencv.py | 67 +++- .../extrinsic_charuco_opencv.py | 290 +++++++++++++++--- src/pose_solver/pose_solver.py | 4 +- src/pose_solver/structures.py | 54 ++-- 9 files changed, 399 insertions(+), 109 deletions(-) diff --git a/src/common/__init__.py b/src/common/__init__.py index da3162f..f4fdb8d 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -20,6 +20,7 @@ ExtrinsicCalibrator, \ IntrinsicCalibration, \ IntrinsicCalibrator, \ + MCTExtrinsicCalibrationError, \ MCTIntrinsicCalibrationError from .camera import \ Camera, \ @@ -30,6 +31,7 @@ ImageResolution, \ ImageUtils from .math import \ + FeatureRay, \ IntrinsicParameters, \ IterativeClosestPointParameters, \ Landmark, \ diff --git a/src/common/calibration.py b/src/common/calibration.py index 43fb33c..c641416 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -33,6 +33,14 @@ def __init__(self, message: str, *args): self.message = message +class MCTExtrinsicCalibrationError(MCTError): + message: str + + def __init__(self, message: str, *args): + super().__init__(args) + self.message = message + + _RESULT_FORMAT: Final[str] = ".json" @@ -57,7 +65,8 @@ class _ImageMetadata(BaseModel): detector_label: str = Field() resolution: ImageResolution = Field() image_label: str = Field(default_factory=str) # human-readable label - timestamp_utc: str = Field(default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) + timestamp_utc_iso8601: str = Field( + default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) state: _ImageState = Field(default=_ImageState.SELECT) def is_selected(self): @@ -464,7 +473,10 @@ def add_image( self, image_base64: str, detector_label: str = "", + timestamp_utc_iso8601: str | None = None ) -> str: # id of image + if timestamp_utc_iso8601 is None: + timestamp_utc_iso8601 = datetime.datetime.now(tz=datetime.timezone.utc).isoformat() image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") identifier: str = str(uuid.uuid4()) resolution: ImageResolution = ImageResolution(x_px=image.shape[1], y_px=image.shape[0]) @@ -473,7 +485,8 @@ def add_image( identifier=identifier, filepath=filepath, detector_label=detector_label, - resolution=resolution) + resolution=resolution, + timestamp_utc_iso8601=timestamp_utc_iso8601) self._add_image( image=image, metadata=metadata) @@ -655,7 +668,8 @@ def __init__( def add_image( self, image_base64: str, - detector_label: str + detector_label: str, + timestamp_utc_iso8601: str ) -> str: # id of image image: numpy.ndarray = ImageUtils.base64_to_image(input_base64=image_base64, color_mode="color") identifier: str = str(uuid.uuid4()) @@ -665,7 +679,8 @@ def add_image( identifier=identifier, filepath=filepath, detector_label=detector_label, - resolution=resolution) + resolution=resolution, + timestamp_utc_iso8601=timestamp_utc_iso8601) self._add_image( image=image, metadata=metadata) @@ -693,6 +708,13 @@ def calculate( continue image_metadata_list.append(image_metadata) + # This is a check to make sure that there are no duplicates over any (timestamp, detector_label) + identifiers: list[tuple[str, str]] = [ + (metadata.timestamp_utc_iso8601, metadata.detector_label) + for metadata in image_metadata_list] + if len(identifiers) != len(set(identifiers)): + raise MCTExtrinsicCalibrationError(message="Duplicates were detected over (timestamp, detector_label).") + if len(image_metadata_list) == 0: raise MCTIntrinsicCalibrationError(message=f"No images found for calibration.") diff --git a/src/common/math.py b/src/common/math.py index d6c2dd5..2bded75 100644 --- a/src/common/math.py +++ b/src/common/math.py @@ -218,6 +218,24 @@ def __init__( self.direction = direction +class FeatureRay(Ray): + """Same as Ray, but with an added feature_label str""" + feature_label: str + + def __init__( + self, + feature_label: str, + source_point: list[float], + direction: list[float], + epsilon: float = _DEFAULT_EPSILON + ): + super().__init__( + source_point=source_point, + direction=direction, + epsilon=epsilon) + self.feature_label = feature_label + + class Target(BaseModel): """ A trackable object. @@ -418,7 +436,7 @@ def closest_point_on_ray( @staticmethod def convert_detector_points_to_vectors( - points: list[list[float]], # [point_index][x/y/z] + points: list[list[float]], # [point_index][x/y] detector_intrinsics: IntrinsicParameters, detector_to_reference_matrix: Matrix4x4 ) -> list[list[float]]: @@ -499,13 +517,13 @@ def convex_quadrilateral_area( @staticmethod def estimate_matrix_transform_to_detector( annotations: list[Annotation], - target: Target, + landmarks: list[Landmark], detector_intrinsics: IntrinsicParameters ) -> Matrix4x4: target_points: list[list[float]] = list() # ordered points [point_index][x/y/z] detector_points: list[list[float]] = list() # ordered points [point_index][x/y] annotations_dict: dict[str, Annotation] = {annotation.feature_label: annotation for annotation in annotations} - for landmark in target.landmarks: + for landmark in landmarks: if landmark.feature_label in annotations_dict.keys(): annotation = annotations_dict[landmark.feature_label] target_points.append([landmark.x, landmark.y, landmark.z]) diff --git a/src/gui/panels/specialized/calibration_image_table.py b/src/gui/panels/specialized/calibration_image_table.py index f41dc00..b6f46ec 100644 --- a/src/gui/panels/specialized/calibration_image_table.py +++ b/src/gui/panels/specialized/calibration_image_table.py @@ -39,7 +39,7 @@ def _set_row_contents( self.table.SetCellValue( row=row_index, col=_COL_IDX_TIMESTAMP, - s=str(row_content.timestamp_utc)) + s=str(row_content.timestamp_utc_iso8601)) self.table.SetCellValue( row=row_index, col=_COL_IDX_STATUS, diff --git a/src/implementations/annotator_aruco_opencv.py b/src/implementations/annotator_aruco_opencv.py index 4d5ef19..672dc95 100644 --- a/src/implementations/annotator_aruco_opencv.py +++ b/src/implementations/annotator_aruco_opencv.py @@ -80,36 +80,9 @@ def update( message=message) self.set_status(Annotator.Status.FAILURE) return - image_greyscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) - (detected_corner_points_raw, detected_dictionary_indices, rejected_corner_points_raw) = cv2.aruco.detectMarkers( - image=image_greyscale, - dictionary=self._aruco_dictionary, - parameters=self._aruco_parameters) - - self._snapshots_identified = list() - # note: detected_indices is (inconsistently) None sometimes if nothing is detected - if detected_dictionary_indices is not None and len(detected_dictionary_indices) > 0: - detected_count = detected_dictionary_indices.size - # Shape of some output was previously observed to (also) be inconsistent... make it consistent here: - detected_corner_points_px = numpy.array(detected_corner_points_raw).reshape((detected_count, 4, 2)) - detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_count)) - for detected_index, detected_id in enumerate(detected_dictionary_indices): - for corner_index in range(4): - detected_label: str = f"{detected_id}{Annotation.RELATION_CHARACTER}{corner_index}" - self._snapshots_identified.append(Annotation( - feature_label=detected_label, - x_px=float(detected_corner_points_px[detected_index][corner_index][0]), - y_px=float(detected_corner_points_px[detected_index][corner_index][1]))) - - self._snapshots_unidentified = list() - if rejected_corner_points_raw: - rejected_corner_points_px = numpy.array(rejected_corner_points_raw).reshape((-1, 4, 2)) - for rejected_index in range(rejected_corner_points_px.shape[0]): - for corner_index in range(4): - self._snapshots_unidentified.append(Annotation( - feature_label=Annotation.UNIDENTIFIED_LABEL, - x_px=float(rejected_corner_points_px[rejected_index][corner_index][0]), - y_px=float(rejected_corner_points_px[rejected_index][corner_index][1]))) - + self._snapshots_identified, self._snapshots_unidentified = ArucoOpenCVCommon.annotations_from_greyscale_image( + aruco_detector_parameters=self._aruco_parameters, + aruco_dictionary=self._aruco_dictionary, + image_greyscale=image_greyscale) self._update_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) diff --git a/src/implementations/common_aruco_opencv.py b/src/implementations/common_aruco_opencv.py index 47fa667..d63aa8c 100644 --- a/src/implementations/common_aruco_opencv.py +++ b/src/implementations/common_aruco_opencv.py @@ -1,4 +1,5 @@ from src.common import \ + Annotation, \ KeyValueMetaAny, \ KeyValueMetaBool, \ KeyValueMetaEnum, \ @@ -143,12 +144,34 @@ def __init__(self): self.marker_size_px = 400 self.px_per_mm = 40 - def aruco_dictionary(self) -> ...: # type cv2.aruco.Dictionary + def aruco_dictionary(self) -> cv2.aruco.Dictionary: if self.dictionary_name != "DICT_4X4_100": raise NotImplementedError("Only DICT_4X4_100 is currently implemented") aruco_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) return aruco_dictionary + def as_target( + self, + target_label: str + ) -> Target: + """ + Note that the coordinates assume the same axes as get_marker_center_points, + but the origin is in the center of the board, not the bottom-left corner. + """ + corner_points: list[list[float]] = self.get_marker_corner_points() + marker_count: int = len(corner_points) // 4 + landmarks: list[Landmark] = list() + for marker_index in range(0, marker_count): + for corner_index in range(0, 4): + landmarks.append(Landmark( + feature_label=f"{marker_index}{Landmark.RELATION_CHARACTER}{corner_index}", + x=corner_points[marker_index*4+corner_index][0], + y=corner_points[marker_index*4+corner_index][1], + z=corner_points[marker_index*4+corner_index][2])) + return Target( + label=target_label, + landmarks=landmarks) + def size_px(self) -> tuple[float, float]: board_size_x_px = self.square_count_x * self.square_size_px board_size_y_px = self.square_count_y * self.square_size_px @@ -216,6 +239,44 @@ def get_marker_ids(self) -> list[int]: num_markers = self.square_count_x * self.square_count_y // 2 return list(range(num_markers)) + @staticmethod + def annotations_from_greyscale_image( + aruco_detector_parameters: cv2.aruco.DetectorParameters, + aruco_dictionary: cv2.aruco.Dictionary, + image_greyscale: numpy.ndarray + ) -> tuple[list[Annotation], list[Annotation]]: + (detected_corner_points_raw, detected_dictionary_indices, rejected_corner_points_raw) = cv2.aruco.detectMarkers( + image=image_greyscale, + dictionary=aruco_dictionary, + parameters=aruco_detector_parameters) + + detected_annotations: list[Annotation] = list() + # note: detected_indices is (inconsistently) None sometimes if nothing is detected + if detected_dictionary_indices is not None and len(detected_dictionary_indices) > 0: + detected_count = detected_dictionary_indices.size + # Shape of some output was previously observed to (also) be inconsistent... make it consistent here: + detected_corner_points_px = numpy.array(detected_corner_points_raw).reshape((detected_count, 4, 2)) + detected_dictionary_indices = list(detected_dictionary_indices.reshape(detected_count)) + for detected_index, detected_id in enumerate(detected_dictionary_indices): + for corner_index in range(4): + detected_label: str = f"{detected_id}{Annotation.RELATION_CHARACTER}{corner_index}" + detected_annotations.append(Annotation( + feature_label=detected_label, + x_px=float(detected_corner_points_px[detected_index][corner_index][0]), + y_px=float(detected_corner_points_px[detected_index][corner_index][1]))) + + rejected_annotations: list[Annotation] = list() + if rejected_corner_points_raw: + rejected_corner_points_px = numpy.array(rejected_corner_points_raw).reshape((-1, 4, 2)) + for rejected_index in range(rejected_corner_points_px.shape[0]): + for corner_index in range(4): + rejected_annotations.append(Annotation( + feature_label=Annotation.UNIDENTIFIED_LABEL, + x_px=float(rejected_corner_points_px[rejected_index][corner_index][0]), + y_px=float(rejected_corner_points_px[rejected_index][corner_index][1]))) + + return detected_annotations, rejected_annotations + @staticmethod def assign_aruco_detection_parameters_to_key_value_list( detection_parameters: ... # cv2.aruco.DetectionParameters @@ -626,6 +687,10 @@ def target_from_marker_parameters( base_label : str, marker_size: float ) -> Target: + """ + :param base_label: Should correspond to the index of the ArUco marker in the dictionary + :param marker_size: + """ corner_points: list[list[float]] = MathUtils.square_marker_corner_points(marker_size=marker_size) landmarks: list[Landmark] = [ Landmark( diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py index 3c3df9a..92d6391 100644 --- a/src/implementations/extrinsic_charuco_opencv.py +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -1,54 +1,264 @@ from .common_aruco_opencv import ArucoOpenCVCommon from src.common import \ + Annotation, \ ExtrinsicCalibration, \ + ExtrinsicCalibrationDetectorResult, \ ExtrinsicCalibrator, \ - IntrinsicParameters + FeatureRay, \ + IntrinsicParameters, \ + Landmark, \ + MathUtils, \ + Matrix4x4, \ + Target import cv2 import cv2.aruco +import datetime +import numpy +from pydantic import BaseModel, Field +from typing import Final + + +_EPSILON: Final[float] = 0.0001 + + +class _ImageData(BaseModel): + """ + Helper structure - data stored for each image + """ + detector_label: str = Field() + annotations: list[Annotation] = Field() + rays: list[FeatureRay] = Field(default_factory=list) + + def annotations_as_points(self) -> list[list[float]]: + return [ + [annotation.x_px, annotation.y_px] + for annotation in self.annotations] + + +class _FeatureData(BaseModel): + """ + Helper structure - data stored for each feature + """ + feature_label: str = Field() + position: Landmark | None = Field(default=None) # None means it has not (or cannot) be calculated + + +class _TimestampData(BaseModel): + """ + Helper structure - data stored for each unique timestamp + """ + timestamp_utc_iso8601: str = Field() + images: list[_ImageData] = Field(default_factory=list) + features: list[_FeatureData] = Field(default_factory=list) + + +class _DetectorData(BaseModel): + """ + Helper structure - data stored for each detector + """ + detector_label: str = Field() + intrinsic_parameters: IntrinsicParameters = Field() + initial_to_reference: Matrix4x4 | None = Field(default=None) # Stored primarily for analyses + refined_to_reference: Matrix4x4 | None = Field(default=None) + + +class _CalibrationData(BaseModel): + """ + Helper structure - container for all things related to calibration + """ + timestamps: list[_TimestampData] = Field(default_factory=list) + detectors: list[_DetectorData] = Field(default_factory=list) + + def get_detector_container( + self, + detector_label: str + ) -> _DetectorData: + for detector in self.detectors: + if detector.detector_label == detector_label: + return detector + raise IndexError() + + def get_feature_container( + self, + timestamp_utc_iso8601: str, + feature_label: str + ) -> _FeatureData: + for timestamp in self.timestamps: + if timestamp.timestamp_utc_iso8601 == timestamp_utc_iso8601: + for feature in timestamp.features: + if feature.feature_label == feature_label: + return feature + break + raise IndexError() + + def get_image_container( + self, + timestamp_utc_iso8601: str, + detector_label: str + ) -> _ImageData: + for timestamp in self.timestamps: + if timestamp.timestamp_utc_iso8601 == timestamp_utc_iso8601: + for image in timestamp.images: + if image.detector_label == detector_label: + return image + break + raise IndexError() + + def get_timestamp_container( + self, + timestamp_utc_iso8601: str + ) -> _TimestampData: + for timestamp in self.timestamps: + if timestamp.timestamp_utc_iso8601 == timestamp_utc_iso8601: + return timestamp + raise IndexError() class CharucoOpenCVIntrinsicCalibrator(ExtrinsicCalibrator): + + @staticmethod + def _annotate_image( + aruco_detector_parameters: cv2.aruco.DetectorParameters, + aruco_dictionary: cv2.aruco.Dictionary, + image_metadata: ExtrinsicCalibrator.ImageMetadata + ) -> list[Annotation]: + image_rgb = cv2.imread(image_metadata.filepath) + image_greyscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) + annotations: list[Annotation] + annotations, _ = ArucoOpenCVCommon.annotations_from_greyscale_image( + aruco_detector_parameters=aruco_detector_parameters, + aruco_dictionary=aruco_dictionary, + image_greyscale=image_greyscale) + return annotations + def _calculate_implementation( self, detector_intrinsics_by_label: dict[str, IntrinsicParameters], image_metadata_list: list[ExtrinsicCalibrator.ImageMetadata] ) -> tuple[ExtrinsicCalibration, list[ExtrinsicCalibrator.ImageMetadata]]: - aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() - - charuco_spec = ArucoOpenCVCommon.CharucoBoard() - charuco_board: cv2.aruco.CharucoBoard = charuco_spec.create_board() - - raise NotImplementedError() - - # data: - # per detector: - # initial_frame transform to reference_target - # final transform to reference_target - # per frame: - # image - # (marker_id,2d_points)s - # final (frame_id,marker_id,3d_points)s - # - # input data: - # per detector: - # per frame: - # PNG: image - # - # output data: - # per detector: - # JSON: transform to reference_target - # JSON: Additional stats, inc. reference_target definition - - # Constraint: Reference board must be visible to all cameras for first frame_id (frame_0) - # - Estimate camera position relative to frame_0 - # MathUtils.estimate_matrix_transform_to_detector() - # - Convert points to rays for all (camera_id, frame_id) using frame_0 as basis - # MathUtils.convert_detector_corners_to_vectors() - # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. - # MathUtils.closest_intersection_between_n_lines() - # - Refine camera positions based on working_points via PnP - # MathUtils.estimate_matrix_transform_to_detector() - # Iterate max times or until convergence: - # - Convert points to rays for all (camera_id, frame_id), using working_points as basis - # - For each (frame_id, point_id), intersect N rays to get 3D points. All 3D Points = working_points. - # - Refine camera positions based on working_points via PnP + charuco_spec: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() + aruco_detector_parameters: cv2.aruco.DetectorParameters = cv2.aruco.DetectorParameters() + aruco_dictionary: cv2.aruco.Dictionary = charuco_spec.aruco_dictionary() + charuco_target: Target = charuco_spec.as_target(target_label="board") + + # Populate _CalibrationData structure, including detection of annotations + data: _CalibrationData = _CalibrationData() + for metadata in image_metadata_list: + annotations: list[Annotation] = self._annotate_image( + aruco_detector_parameters=aruco_detector_parameters, + aruco_dictionary=aruco_dictionary, + image_metadata=metadata) + image_data: _ImageData = _ImageData( + detector_label=metadata.detector_label, + annotations=annotations) + timestamp_data: _TimestampData + try: + timestamp_data = data.get_timestamp_container(timestamp_utc_iso8601=metadata.timestamp_utc_iso8601) + except IndexError: + timestamp_data = _TimestampData(timestamp_utc_iso8601=metadata.timestamp_utc_iso8601) + data.timestamps.append(timestamp_data) + timestamp_data.images.append(image_data) + try: + data.get_detector_container(detector_label=image_data.detector_label) + except IndexError: + detector: _DetectorData = _DetectorData( + detector_label=metadata.detector_label, + intrinsic_parameters=detector_intrinsics_by_label[metadata.detector_label]) + data.detectors.append(detector) + for annotation in annotations: + try: + data.get_feature_container( + timestamp_utc_iso8601=metadata.timestamp_utc_iso8601, + feature_label=annotation.feature_label) + except IndexError: + feature_data: _FeatureData = _FeatureData(feature_label=annotation.feature_label) + timestamp_data = data.get_timestamp_container(timestamp_utc_iso8601=metadata.timestamp_utc_iso8601) + timestamp_data.features.append(feature_data) + + # Initial estimate of the pose of each detector relative to first frame + first_timestamp: _TimestampData = data.get_timestamp_container( + timestamp_utc_iso8601=min([metadata.timestamp_utc_iso8601 for metadata in image_metadata_list])) + for metadata in image_metadata_list: + if metadata.timestamp_utc_iso8601 == first_timestamp.timestamp_utc_iso8601: + image_data: _ImageData = data.get_image_container( + timestamp_utc_iso8601=metadata.timestamp_utc_iso8601, + detector_label=metadata.detector_label) + intrinsic_parameters: IntrinsicParameters = detector_intrinsics_by_label[metadata.detector_label] + initial_to_reference: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + annotations=image_data.annotations, + landmarks=charuco_target.landmarks, + detector_intrinsics=intrinsic_parameters) + detector: _DetectorData = _DetectorData( + detector_label=metadata.detector_label, + intrinsic_parameters=intrinsic_parameters, + initial_to_reference=initial_to_reference, + refined_to_reference=initial_to_reference) + data.detectors.append(detector) + + max_iter: int = 500 + for i in range(0, max_iter): + # Update each ray based on the current pose + for timestamp_data in data.timestamps: + for image_data in timestamp_data.images: + detector_data: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) + feature_labels: list[str] = [annotation.feature_label for annotation in image_data.annotations] + ray_directions: list[list[float]] = MathUtils.convert_detector_points_to_vectors( + points=image_data.annotations_as_points(), + detector_intrinsics=detector_data.intrinsic_parameters, + detector_to_reference_matrix=detector_data.refined_to_reference) + source_point: list[float] = detector_data.refined_to_reference.get_translation() + annotation_count = len(image_data.annotations) + image_data.rays = [ + FeatureRay( + feature_label=feature_labels[annotation_index], + source_point=source_point, + direction=ray_directions[annotation_index]) + for annotation_index in range(0, annotation_count)] + # For each (timestamp, feature_label), intersect rays to get 3D positions in a common coordinate system + for timestamp_data in data.timestamps: + for feature_data in timestamp_data.features: + ray_list: list[FeatureRay] = list() + feature_label = feature_data.feature_label + for image_data in timestamp_data.images: + for ray in image_data.rays: + if ray.feature_label == feature_label: + ray_list.append(ray) + ray_intersection: MathUtils.RayIntersectionNOutput = MathUtils.closest_intersection_between_n_lines( + rays=ray_list, + maximum_distance=_EPSILON) + if ray_intersection.intersection_count() > 0: + position: numpy.ndarray = ray_intersection.centroid() + feature_data.position = Landmark( + feature_label=feature_label, + x=float(position[0]), + y=float(position[1]), + z=float(position[2])) + else: + feature_data.position = None + # Use the newly-calculated 3D points together with the annotations to update the pose (PnP) + for detector_data in data.detectors: + landmarks: list[Landmark] = list() + annotations: list[Annotation] = list() + for timestamp_data in data.timestamps: + for feature_data in timestamp_data.features: + for image_data in timestamp_data.images: + for annotation in image_data.annotations: + if annotation.feature_label == feature_data.feature_label: + landmarks.append(feature_data.position) + annotations.append(annotation) + refined_to_reference = MathUtils.estimate_matrix_transform_to_detector( + annotations=annotations, + landmarks=landmarks, + detector_intrinsics=detector_data.intrinsic_parameters) + detector_data.refined_to_reference = refined_to_reference + # TODO: Termination criteria, check convergence on angle AND distance + + extrinsic_calibration: ExtrinsicCalibration = ExtrinsicCalibration( + timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), + calibrated_values=[ + ExtrinsicCalibrationDetectorResult( + detector_label=detector_data.detector_label, + detector_to_reference=detector_data.refined_to_reference) + for detector_data in data.detectors], + supplemental_data=data.model_dump()) + return extrinsic_calibration, image_metadata_list diff --git a/src/pose_solver/pose_solver.py b/src/pose_solver/pose_solver.py index 8871a74..310b8c1 100644 --- a/src/pose_solver/pose_solver.py +++ b/src/pose_solver/pose_solver.py @@ -346,7 +346,7 @@ def update(self) -> None: intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] reference_to_detector: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( annotations=annotation_list_by_detector_label[detector_label], - target=reference_target, + landmarks=reference_target.landmarks, detector_intrinsics=intrinsics) detector_to_reference: Matrix4x4 = Matrix4x4.from_numpy_array( numpy.linalg.inv(reference_to_detector.as_numpy_array())) @@ -431,7 +431,7 @@ def update(self) -> None: intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] detected_to_detector_matrix4x4: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( annotations=annotation_list_by_detector_label[detector_label], - target=target, + landmarks=target.landmarks, detector_intrinsics=intrinsics) detected_to_detector: numpy.ndarray = detected_to_detector_matrix4x4.as_numpy_array() detector_to_reference: numpy.ndarray = self._poses_by_detector_label[detector_label].as_numpy_array() diff --git a/src/pose_solver/structures.py b/src/pose_solver/structures.py index b1bd67c..c4e1f1f 100644 --- a/src/pose_solver/structures.py +++ b/src/pose_solver/structures.py @@ -80,34 +80,34 @@ class PoseSolverConfiguration(BaseModel): class PoseSolverParameters(BaseModel): minimum_detector_count: int = Field(default=2) - MAXIMUM_RAY_COUNT_FOR_INTERSECTION: int = Field(2) - POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS: float = Field(0.1) - POSE_SINGLE_CAMERA_EXTRAPOLATION_MINIMUM_SURFACE_NORMAL_ANGLE_DEGREES: float = Field(15.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS: float = Field(1.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_MAXIMUM_ORDER: int = Field(0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_ANGLE_DEGREES: float = Field(15.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_DISTANCE: float = Field(15.0, description="millimeters") - POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS: float = Field(0.8) - POSE_SINGLE_CAMERA_NEAREST_LIMIT_ANGLE_DEGREES: float = Field(15.0) - POSE_SINGLE_CAMERA_NEAREST_LIMIT_DISTANCE: float = Field(15.0) - POSE_SINGLE_CAMERA_REPROJECTION_ERROR_FACTOR_BETA_OVER_ALPHA: float = Field(1.0) - POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS: float = Field(0.4) + MAXIMUM_RAY_COUNT_FOR_INTERSECTION: int = Field(default=2) + POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS: float = Field(default=0.1) + POSE_SINGLE_CAMERA_EXTRAPOLATION_MINIMUM_SURFACE_NORMAL_ANGLE_DEGREES: float = Field(default=15.0) + POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS: float = Field(default=1.0) + POSE_SINGLE_CAMERA_EXTRAPOLATION_MAXIMUM_ORDER: int = Field(default=0) + POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_ANGLE_DEGREES: float = Field(default=15.0) + POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_DISTANCE: float = Field(default=15.0, description="millimeters") + POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS: float = Field(default=0.8) + POSE_SINGLE_CAMERA_NEAREST_LIMIT_ANGLE_DEGREES: float = Field(default=15.0) + POSE_SINGLE_CAMERA_NEAREST_LIMIT_DISTANCE: float = Field(default=15.0) + POSE_SINGLE_CAMERA_REPROJECTION_ERROR_FACTOR_BETA_OVER_ALPHA: float = Field(default=1.0) + POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS: float = Field(default=0.4) # TODO: Is this next one detector-specific? - POSE_SINGLE_CAMERA_DEPTH_CORRECTION: float = Field(-7.5, description="millimeters, observed tendency to overestimate depth.") - POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS: float = Field(1.0) - INTERSECTION_MAXIMUM_DISTANCE: float = Field(10.0, description="millimeters") - icp_termination_iteration_count: int = Field(50) - icp_termination_translation: float = Field(0.005, description="millimeters") - icp_termination_rotation_radians: float = Field(0.0005) - icp_termination_mean_point_distance: float = Field(0.1, description="millimeters") - icp_termination_rms_point_distance: float = Field(0.1, description="millimeters") - DENOISE_OUTLIER_DISTANCE_MILLIMETERS: float = Field(10.0) - DENOISE_OUTLIER_ANGLE_DEGREES: float = Field(5.0) - DENOISE_STORAGE_SIZE: int = Field(10) - DENOISE_FILTER_SIZE: int = Field(7) - DENOISE_REQUIRED_STARTING_STREAK: int = Field(3) - ARUCO_MARKER_DICTIONARY_ENUM: int = Field(cv2.aruco.DICT_4X4_100) - ARUCO_POSE_ESTIMATOR_METHOD: int = Field(cv2.SOLVEPNP_ITERATIVE) + POSE_SINGLE_CAMERA_DEPTH_CORRECTION: float = Field(default=-7.5, description="millimeters, observed tendency to overestimate depth.") + POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS: float = Field(default=1.0) + INTERSECTION_MAXIMUM_DISTANCE: float = Field(default=10.0, description="millimeters") + icp_termination_iteration_count: int = Field(default=50) + icp_termination_translation: float = Field(default=0.005, description="millimeters") + icp_termination_rotation_radians: float = Field(default=0.0005) + icp_termination_mean_point_distance: float = Field(default=0.1, description="millimeters") + icp_termination_rms_point_distance: float = Field(default=0.1, description="millimeters") + DENOISE_OUTLIER_DISTANCE_MILLIMETERS: float = Field(default=10.0) + DENOISE_OUTLIER_ANGLE_DEGREES: float = Field(default=5.0) + DENOISE_STORAGE_SIZE: int = Field(default=10) + DENOISE_FILTER_SIZE: int = Field(default=7) + DENOISE_REQUIRED_STARTING_STREAK: int = Field(default=3) + ARUCO_MARKER_DICTIONARY_ENUM: int = Field(default=cv2.aruco.DICT_4X4_100) + ARUCO_POSE_ESTIMATOR_METHOD: int = Field(default=cv2.SOLVEPNP_ITERATIVE) # SOLVEPNP_ITERATIVE works okay but is susceptible to optical illusions (flipping) # SOLVEPNP_P3P appears to return nan's on rare occasion # SOLVEPNP_SQPNP appears to return nan's on rare occasion From 277721821a7ab22c5c3704487da5044db5cec221 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Mon, 11 Aug 2025 16:22:14 -0400 Subject: [PATCH 15/33] WIP Update automated test to use extrinsic calibration (Not yet working) --- src/common/calibration.py | 19 ++--- src/common/math.py | 23 ++--- .../extrinsic_charuco_opencv.py | 45 +++++++--- test/images/simulated/ideal/about.txt | 10 +-- test/test_extrinsic_calibration.py | 83 +++++++++++-------- 5 files changed, 103 insertions(+), 77 deletions(-) diff --git a/src/common/calibration.py b/src/common/calibration.py index c641416..f4b22ac 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -688,13 +688,18 @@ def add_image( def calculate( self, - detector_labels: list[str], - detector_intrinsics: list[IntrinsicParameters] + detector_intrinsics_by_label: dict[str, IntrinsicParameters] ) -> tuple[str, ExtrinsicCalibration]: """ :returns: a tuple containing a result identifier (GUID as string) and the ExtrinsicCalibration structure """ + # if detector_labels != list(set(detector_labels)): + # raise MCTIntrinsicCalibrationError(message=f"Detector labels must not contain duplicated elements.") + # if len(detector_labels) != len(detector_intrinsics): + # raise MCTIntrinsicCalibrationError(message=f"Expected detector labels and intrinsics to be of same size.") + # detector_intrinsics_by_label: dict[str, IntrinsicParameters] = dict(zip(detector_labels, detector_intrinsics)) + image_metadata_list: list[_ImageMetadata] = list() # image metadata available for calibration for image_index, image_metadata in enumerate(self._data_ledger.image_metadata_list): if image_metadata.state != _ImageState.SELECT: @@ -718,21 +723,13 @@ def calculate( if len(image_metadata_list) == 0: raise MCTIntrinsicCalibrationError(message=f"No images found for calibration.") - if detector_labels != list(set(detector_labels)): - raise MCTIntrinsicCalibrationError(message=f"Detector labels must not contain duplicated elements.") - - if len(detector_labels) != len(detector_intrinsics): - raise MCTIntrinsicCalibrationError(message=f"Expected detector labels and intrinsics to be of same size.") - - detector_intrinsics_by_label: dict[str, IntrinsicParameters] = dict(zip(detector_labels, detector_intrinsics)) - extrinsic_calibration, image_metadata_list = self._calculate_implementation( detector_intrinsics_by_label=detector_intrinsics_by_label, image_metadata_list=image_metadata_list) result_identifier: str = str(uuid.uuid4()) result_filepath = os.path.join(self._data_path, result_identifier + _RESULT_FORMAT) - result_metadata: ExtrinsicCalibration.ResultMetadata = ExtrinsicCalibration.ResultMetadata( + result_metadata: ExtrinsicCalibrator.ResultMetadata = ExtrinsicCalibrator.ResultMetadata( identifier=result_identifier, filepath=result_filepath, image_identifiers=[image_metadata.identifier for image_metadata in image_metadata_list]) diff --git a/src/common/math.py b/src/common/math.py index 2bded75..5da8b63 100644 --- a/src/common/math.py +++ b/src/common/math.py @@ -151,6 +151,10 @@ def __mul__(self, other) -> 'Matrix4x4': result_numpy_array = numpy.matmul(self.as_numpy_array(), other.as_numpy_array()) return Matrix4x4(values=list(result_numpy_array.flatten())) + def get_rotation_as_quaternion(self) -> list[float]: + # noinspection PyArgumentList + return Rotation.from_matrix(self.as_numpy_array()[0:3, 0:3]).as_quat().tolist() + def get_translation(self) -> list[float]: """ Return a vector of [x,y,z] representing translation. @@ -218,23 +222,12 @@ def __init__( self.direction = direction -class FeatureRay(Ray): +class FeatureRay(BaseModel, Ray): """Same as Ray, but with an added feature_label str""" + source_point: list[float] + direction: list[float] feature_label: str - def __init__( - self, - feature_label: str, - source_point: list[float], - direction: list[float], - epsilon: float = _DEFAULT_EPSILON - ): - super().__init__( - source_point=source_point, - direction=direction, - epsilon=epsilon) - self.feature_label = feature_label - class Target(BaseModel): """ @@ -358,7 +351,7 @@ class RayIntersectionNOutput: centroids: numpy.ndarray # How many rays were used. - # Note that centroids might not use all possible intersections (e.g. parallel rays) + # Note that centroids might not use all possible intersections (e.g. parallel rays, exceeded maximum distance) ray_count: int def __init__( diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py index 92d6391..d500a8e 100644 --- a/src/implementations/extrinsic_charuco_opencv.py +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -15,10 +15,16 @@ import datetime import numpy from pydantic import BaseModel, Field +from scipy.spatial.transform import Rotation +import sys from typing import Final _EPSILON: Final[float] = 0.0001 +_MAX_FLOAT: Final[float] = sys.float_info.max +_TERMINATION_ITERATION_COUNT: Final[int] = 500 +_TERMINATION_ROTATION_CHANGE_DEGREES: Final[float] = 0.05 +_TERMINATION_TRANSLATION_CHANGE: Final[float] = 0.05 class _ImageData(BaseModel): @@ -114,7 +120,7 @@ def get_timestamp_container( raise IndexError() -class CharucoOpenCVIntrinsicCalibrator(ExtrinsicCalibrator): +class CharucoOpenCVExtrinsicCalibrator(ExtrinsicCalibrator): @staticmethod def _annotate_image( @@ -188,18 +194,16 @@ def _calculate_implementation( annotations=image_data.annotations, landmarks=charuco_target.landmarks, detector_intrinsics=intrinsic_parameters) - detector: _DetectorData = _DetectorData( - detector_label=metadata.detector_label, - intrinsic_parameters=intrinsic_parameters, - initial_to_reference=initial_to_reference, - refined_to_reference=initial_to_reference) - data.detectors.append(detector) + detector: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) + detector.initial_to_reference = initial_to_reference + detector.refined_to_reference = initial_to_reference - max_iter: int = 500 - for i in range(0, max_iter): + for i in range(0, _TERMINATION_ITERATION_COUNT): # Update each ray based on the current pose for timestamp_data in data.timestamps: for image_data in timestamp_data.images: + if len(image_data.annotations) == 0: + continue detector_data: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) feature_labels: list[str] = [annotation.feature_label for annotation in image_data.annotations] ray_directions: list[list[float]] = MathUtils.convert_detector_points_to_vectors( @@ -225,7 +229,7 @@ def _calculate_implementation( ray_list.append(ray) ray_intersection: MathUtils.RayIntersectionNOutput = MathUtils.closest_intersection_between_n_lines( rays=ray_list, - maximum_distance=_EPSILON) + maximum_distance=_MAX_FLOAT) if ray_intersection.intersection_count() > 0: position: numpy.ndarray = ray_intersection.centroid() feature_data.position = Landmark( @@ -236,6 +240,7 @@ def _calculate_implementation( else: feature_data.position = None # Use the newly-calculated 3D points together with the annotations to update the pose (PnP) + converged: bool = True # until shown otherwise for detector_data in data.detectors: landmarks: list[Landmark] = list() annotations: list[Annotation] = list() @@ -243,15 +248,29 @@ def _calculate_implementation( for feature_data in timestamp_data.features: for image_data in timestamp_data.images: for annotation in image_data.annotations: - if annotation.feature_label == feature_data.feature_label: + if annotation.feature_label == feature_data.feature_label and \ + feature_data.position is not None: landmarks.append(feature_data.position) annotations.append(annotation) - refined_to_reference = MathUtils.estimate_matrix_transform_to_detector( + refined_to_reference: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( annotations=annotations, landmarks=landmarks, detector_intrinsics=detector_data.intrinsic_parameters) + translation_change: float = numpy.linalg.norm( + numpy.asarray(refined_to_reference.get_translation()) - + numpy.asarray(detector_data.refined_to_reference.get_translation())) + old_to_reference: numpy.ndarray = detector_data.refined_to_reference.as_numpy_array() + reference_to_refined: numpy.ndarray = refined_to_reference.inverse().as_numpy_array() + old_to_refined: numpy.ndarray = numpy.matmul(reference_to_refined, old_to_reference) + # noinspection PyArgumentList + rotation_change_degrees: float = \ + numpy.linalg.norm(Rotation.from_matrix(old_to_refined[0:3, 0:3]).as_rotvec(degrees=True)) detector_data.refined_to_reference = refined_to_reference - # TODO: Termination criteria, check convergence on angle AND distance + if rotation_change_degrees > _TERMINATION_ROTATION_CHANGE_DEGREES or \ + translation_change > _TERMINATION_TRANSLATION_CHANGE: + converged = False + if converged: + break extrinsic_calibration: ExtrinsicCalibration = ExtrinsicCalibration( timestamp_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), diff --git a/test/images/simulated/ideal/about.txt b/test/images/simulated/ideal/about.txt index 654ddce..6e2d46a 100644 --- a/test/images/simulated/ideal/about.txt +++ b/test/images/simulated/ideal/about.txt @@ -4,10 +4,10 @@ C_F.png Each camera is 1 meter away from a central pivot point at the origin. Each camera has its y axis aligned with the world's up direction (+z-axis in Blender). -Cameras 01 through 05 are all oriented 30 degrees relative to the table surface, each rotated 45 degrees around the world origin. -Cameras 06 through 10 are all oriented 45 degrees relative to the table surface, each rotated 45 degrees around the world origin +Cameras 01 through 05 are all oriented 30 degrees relative to the table surface, each increasingly rotated 45 degrees around the world origin. +Cameras 06 through 10 are all oriented 45 degrees relative to the table surface, each increasingly rotated 45 degrees around the world origin All cameras use the same viewing angle and image resolutions. -Frames from the A and B sets will have the ChArUco board visible. -Frames from the C and D sets will have the ChArUco board at least partially visible to most cameras but not all. -Frames from the E set are designed to be visible only to specific cameras. +Frames from the A and B sets will have the ChArUco board visible to all cameras. +Frames from the C and D sets will have the ChArUco board visible to most cameras but not all. +Frames from the E set are designed to be visible only to specific limited sets of cameras. diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index 6f6aa26..f190b38 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -1,8 +1,9 @@ from src.common import \ - Annotation, \ - Annotator, \ + ExtrinsicCalibration, \ ImageResolution, \ ImageUtils, \ + IntrinsicParameters, \ + IntrinsicCalibration, \ IntrinsicCalibrator, \ KeyValueSimpleAny, \ KeyValueSimpleString, \ @@ -10,11 +11,12 @@ StatusMessageSource from src.implementations.common_aruco_opencv import \ ArucoOpenCVCommon -from src.implementations.annotator_aruco_opencv import \ - ArucoOpenCVAnnotator +from src.implementations.extrinsic_charuco_opencv import \ + CharucoOpenCVExtrinsicCalibrator from src.implementations.intrinsic_charuco_opencv import \ CharucoOpenCVIntrinsicCalibrator import cv2 +import datetime import numpy import os import re @@ -42,7 +44,11 @@ def test(self): # Organize ourselves with respect to the input data image_location: str = os.path.join("images", "simulated", "ideal") image_contents: list[str] = os.listdir(image_location) - image_filepaths: dict[str, dict[str, str]] = dict() # Access as: images[CameraID][FrameID] + image_filepaths_by_camera_frame: dict[str, dict[str, str]] = dict() # Access as: x[CameraID][FrameID] + image_filepaths_by_frame_camera: dict[str, dict[str, str]] = dict() # Access as: x[FrameID][CameraID] + timestamps_iso8601_by_frame: dict[str, str] = dict() # Access as: x[FrameID] + reference_time: datetime.datetime = datetime.datetime.now(tz=datetime.timezone.utc) + image_count: int = 0 for image_content in image_contents: if image_content == "about.txt": continue @@ -59,10 +65,17 @@ def test(self): camera_id: str = match.group(IMAGE_CONTENT_MATCH_INDEX_CAMERA) frame_id: str = match.group(IMAGE_CONTENT_MATCH_INDEX_FRAME) - if camera_id not in image_filepaths: - image_filepaths[camera_id] = dict() - image_filepaths[camera_id][frame_id] = image_filepath - image_count: int = sum(len(image_filepaths[camera_id]) for camera_id in image_filepaths.keys()) + if camera_id not in image_filepaths_by_camera_frame: + image_filepaths_by_camera_frame[camera_id] = dict() + image_filepaths_by_camera_frame[camera_id][frame_id] = image_filepath + if frame_id not in image_filepaths_by_frame_camera: + image_filepaths_by_frame_camera[frame_id] = dict() + timestamps_iso8601_by_frame[frame_id] = ( + reference_time + - datetime.timedelta(hours=1) + + datetime.timedelta(seconds=image_count)).isoformat() + image_filepaths_by_frame_camera[frame_id][camera_id] = image_filepath + image_count += 1 message = f"Found {image_count} image files." status_message_source.enqueue_status_message( severity=SeverityLabel.INFO, @@ -72,38 +85,42 @@ def test(self): # To simplify our lives and ensure a reasonable result, # we'll calibrate all cameras with the same set of input images. # We'll use all images from the A# and B# sets of frames. - calibration_result: CharucoOpenCVIntrinsicCalibrator | None + intrinsic_parameters: IntrinsicParameters with TemporaryDirectory() as temppath: - calibrator: CharucoOpenCVIntrinsicCalibrator = CharucoOpenCVIntrinsicCalibrator( + intrinsic_calibrator: CharucoOpenCVIntrinsicCalibrator = CharucoOpenCVIntrinsicCalibrator( configuration=IntrinsicCalibrator.Configuration(data_path=temppath), status_message_source=status_message_source) - for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): + for camera_id, image_filepaths_by_frame_id in image_filepaths_by_camera_frame.items(): for frame_id, image_filepath in image_filepaths_by_frame_id.items(): if not frame_id.startswith("A") and not frame_id.startswith("B"): continue image: numpy.ndarray = cv2.imread(image_filepath) image_base64: str = ImageUtils.image_to_base64(image) - calibrator.add_image(image_base64) - _, calibration_result = calibrator.calculate(image_resolution=IMAGE_RESOLUTION) + intrinsic_calibrator.add_image(image_base64) + intrinsics_calibration: IntrinsicCalibration + _, intrinsics_calibration = intrinsic_calibrator.calculate(image_resolution=IMAGE_RESOLUTION) + intrinsic_parameters = intrinsics_calibration.calibrated_values - marker: ArucoOpenCVAnnotator = ArucoOpenCVAnnotator( - configuration=Annotator.Configuration(method="aruco_opencv"), - status_message_source=status_message_source) - marker.set_parameters(parameters=MARKER_DETECTION_PARAMETERS) - image_marker_snapshots: dict[str, dict[str, list[Annotation]]] = dict() - detection_count: int = 0 - for camera_id, image_filepaths_by_frame_id in image_filepaths.items(): - for frame_id, image_filepath in image_filepaths_by_frame_id.items(): - if camera_id not in image_marker_snapshots: - image_marker_snapshots[camera_id] = dict() - image: numpy.ndarray = cv2.imread(image_filepath) - marker.update(image) - marker_snapshots: list[Annotation] = marker.get_markers_detected() - image_marker_snapshots[camera_id][frame_id] = marker_snapshots - detection_count += len(marker_snapshots) - message = f"{detection_count} detections." - status_message_source.enqueue_status_message( - severity=SeverityLabel.INFO, - message=message) + intrinsics_by_camera: dict[str, IntrinsicParameters] = dict() # Access as x[camera_id] + for camera_id in image_filepaths_by_camera_frame.keys(): + intrinsics_by_camera[camera_id] = intrinsic_parameters + + extrinsic_calibrator: CharucoOpenCVExtrinsicCalibrator + extrinsic_calibration: ExtrinsicCalibration + with TemporaryDirectory() as temppath: + extrinsic_calibrator: CharucoOpenCVExtrinsicCalibrator = CharucoOpenCVExtrinsicCalibrator( + configuration=IntrinsicCalibrator.Configuration(data_path=temppath), + status_message_source=status_message_source) + for frame_id, image_filepaths_by_camera_id in image_filepaths_by_frame_camera.items(): + for camera_id, image_filepath in image_filepaths_by_camera_id.items(): + image: numpy.ndarray = cv2.imread(image_filepath) + image_base64: str = ImageUtils.image_to_base64(image) + extrinsic_calibrator.add_image( + image_base64=image_base64, + detector_label=camera_id, + timestamp_utc_iso8601=timestamps_iso8601_by_frame[frame_id]) + _, extrinsic_calibration = extrinsic_calibrator.calculate(detector_intrinsics_by_label=intrinsics_by_camera) + + message = f"{len(extrinsic_calibration.calibrated_values)} calibrations." print(message) From 31f658b9fb6a10b280edbc0ccfe920140e197549 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Tue, 12 Aug 2025 15:11:26 -0400 Subject: [PATCH 16/33] WIP: A few bugfixes, update testing data, calibration now converges --- src/common/math.py | 4 +- .../extrinsic_charuco_opencv.py | 38 +++++++++++++---- test/images/simulated/ideal/C01_FA0.png | 4 +- test/images/simulated/ideal/C01_FB1.png | 4 +- test/images/simulated/ideal/C01_FB2.png | 4 +- test/images/simulated/ideal/C01_FB3.png | 4 +- test/images/simulated/ideal/C01_FB4.png | 4 +- test/images/simulated/ideal/C01_FC1.png | 4 +- test/images/simulated/ideal/C01_FC2.png | 4 +- test/images/simulated/ideal/C01_FC3.png | 4 +- test/images/simulated/ideal/C01_FC4.png | 4 +- test/images/simulated/ideal/C01_FD1.png | 4 +- test/images/simulated/ideal/C01_FD2.png | 4 +- test/images/simulated/ideal/C01_FD3.png | 4 +- test/images/simulated/ideal/C01_FD4.png | 4 +- test/images/simulated/ideal/C01_FE1.png | 4 +- test/images/simulated/ideal/C01_FE2.png | 4 +- test/images/simulated/ideal/C01_FE3.png | 4 +- test/images/simulated/ideal/C02_FA0.png | 4 +- test/images/simulated/ideal/C02_FB1.png | 4 +- test/images/simulated/ideal/C02_FB2.png | 4 +- test/images/simulated/ideal/C02_FB3.png | 4 +- test/images/simulated/ideal/C02_FB4.png | 4 +- test/images/simulated/ideal/C02_FC1.png | 4 +- test/images/simulated/ideal/C02_FC2.png | 4 +- test/images/simulated/ideal/C02_FC3.png | 4 +- test/images/simulated/ideal/C02_FC4.png | 4 +- test/images/simulated/ideal/C02_FD1.png | 4 +- test/images/simulated/ideal/C02_FD2.png | 4 +- test/images/simulated/ideal/C02_FD3.png | 4 +- test/images/simulated/ideal/C02_FD4.png | 4 +- test/images/simulated/ideal/C02_FE1.png | 4 +- test/images/simulated/ideal/C02_FE2.png | 4 +- test/images/simulated/ideal/C02_FE3.png | 4 +- test/images/simulated/ideal/C03_FA0.png | 4 +- test/images/simulated/ideal/C03_FB1.png | 4 +- test/images/simulated/ideal/C03_FB2.png | 4 +- test/images/simulated/ideal/C03_FB3.png | 4 +- test/images/simulated/ideal/C03_FB4.png | 4 +- test/images/simulated/ideal/C03_FC1.png | 4 +- test/images/simulated/ideal/C03_FC2.png | 4 +- test/images/simulated/ideal/C03_FC3.png | 4 +- test/images/simulated/ideal/C03_FC4.png | 4 +- test/images/simulated/ideal/C03_FD1.png | 4 +- test/images/simulated/ideal/C03_FD2.png | 4 +- test/images/simulated/ideal/C03_FD3.png | 4 +- test/images/simulated/ideal/C03_FD4.png | 4 +- test/images/simulated/ideal/C03_FE1.png | 4 +- test/images/simulated/ideal/C03_FE2.png | 4 +- test/images/simulated/ideal/C03_FE3.png | 4 +- test/images/simulated/ideal/C04_FA0.png | 4 +- test/images/simulated/ideal/C04_FB1.png | 4 +- test/images/simulated/ideal/C04_FB2.png | 4 +- test/images/simulated/ideal/C04_FB3.png | 4 +- test/images/simulated/ideal/C04_FB4.png | 4 +- test/images/simulated/ideal/C04_FC1.png | 4 +- test/images/simulated/ideal/C04_FC2.png | 4 +- test/images/simulated/ideal/C04_FC3.png | 4 +- test/images/simulated/ideal/C04_FC4.png | 4 +- test/images/simulated/ideal/C04_FD1.png | 4 +- test/images/simulated/ideal/C04_FD2.png | 4 +- test/images/simulated/ideal/C04_FD3.png | 4 +- test/images/simulated/ideal/C04_FD4.png | 4 +- test/images/simulated/ideal/C04_FE1.png | 4 +- test/images/simulated/ideal/C04_FE2.png | 4 +- test/images/simulated/ideal/C04_FE3.png | 4 +- test/images/simulated/ideal/C05_FA0.png | 4 +- test/images/simulated/ideal/C05_FB1.png | 4 +- test/images/simulated/ideal/C05_FB2.png | 4 +- test/images/simulated/ideal/C05_FB3.png | 4 +- test/images/simulated/ideal/C05_FB4.png | 4 +- test/images/simulated/ideal/C05_FC1.png | 4 +- test/images/simulated/ideal/C05_FC2.png | 4 +- test/images/simulated/ideal/C05_FC3.png | 4 +- test/images/simulated/ideal/C05_FC4.png | 4 +- test/images/simulated/ideal/C05_FD1.png | 4 +- test/images/simulated/ideal/C05_FD2.png | 4 +- test/images/simulated/ideal/C05_FD3.png | 4 +- test/images/simulated/ideal/C05_FD4.png | 4 +- test/images/simulated/ideal/C05_FE1.png | 4 +- test/images/simulated/ideal/C05_FE2.png | 4 +- test/images/simulated/ideal/C05_FE3.png | 4 +- test/images/simulated/ideal/C06_FA0.png | 4 +- test/images/simulated/ideal/C06_FB1.png | 4 +- test/images/simulated/ideal/C06_FB2.png | 4 +- test/images/simulated/ideal/C06_FB3.png | 4 +- test/images/simulated/ideal/C06_FB4.png | 4 +- test/images/simulated/ideal/C06_FC1.png | 4 +- test/images/simulated/ideal/C06_FC2.png | 4 +- test/images/simulated/ideal/C06_FC3.png | 4 +- test/images/simulated/ideal/C06_FC4.png | 4 +- test/images/simulated/ideal/C06_FD1.png | 4 +- test/images/simulated/ideal/C06_FD2.png | 4 +- test/images/simulated/ideal/C06_FD3.png | 4 +- test/images/simulated/ideal/C06_FD4.png | 4 +- test/images/simulated/ideal/C06_FE1.png | 4 +- test/images/simulated/ideal/C06_FE2.png | 4 +- test/images/simulated/ideal/C06_FE3.png | 4 +- test/images/simulated/ideal/C07_FA0.png | 4 +- test/images/simulated/ideal/C07_FB1.png | 4 +- test/images/simulated/ideal/C07_FB2.png | 4 +- test/images/simulated/ideal/C07_FB3.png | 4 +- test/images/simulated/ideal/C07_FB4.png | 4 +- test/images/simulated/ideal/C07_FC1.png | 4 +- test/images/simulated/ideal/C07_FC2.png | 4 +- test/images/simulated/ideal/C07_FC3.png | 4 +- test/images/simulated/ideal/C07_FC4.png | 4 +- test/images/simulated/ideal/C07_FD1.png | 4 +- test/images/simulated/ideal/C07_FD2.png | 4 +- test/images/simulated/ideal/C07_FD3.png | 4 +- test/images/simulated/ideal/C07_FD4.png | 4 +- test/images/simulated/ideal/C07_FE1.png | 4 +- test/images/simulated/ideal/C07_FE2.png | 4 +- test/images/simulated/ideal/C07_FE3.png | 4 +- test/images/simulated/ideal/C08_FA0.png | 4 +- test/images/simulated/ideal/C08_FB1.png | 4 +- test/images/simulated/ideal/C08_FB2.png | 4 +- test/images/simulated/ideal/C08_FB3.png | 4 +- test/images/simulated/ideal/C08_FB4.png | 4 +- test/images/simulated/ideal/C08_FC1.png | 4 +- test/images/simulated/ideal/C08_FC2.png | 4 +- test/images/simulated/ideal/C08_FC3.png | 4 +- test/images/simulated/ideal/C08_FC4.png | 4 +- test/images/simulated/ideal/C08_FD1.png | 4 +- test/images/simulated/ideal/C08_FD2.png | 4 +- test/images/simulated/ideal/C08_FD3.png | 4 +- test/images/simulated/ideal/C08_FD4.png | 4 +- test/images/simulated/ideal/C08_FE1.png | 4 +- test/images/simulated/ideal/C08_FE2.png | 4 +- test/images/simulated/ideal/C08_FE3.png | 4 +- test/images/simulated/ideal/C09_FA0.png | 4 +- test/images/simulated/ideal/C09_FB1.png | 4 +- test/images/simulated/ideal/C09_FB2.png | 4 +- test/images/simulated/ideal/C09_FB3.png | 4 +- test/images/simulated/ideal/C09_FB4.png | 4 +- test/images/simulated/ideal/C09_FC1.png | 4 +- test/images/simulated/ideal/C09_FC2.png | 4 +- test/images/simulated/ideal/C09_FC3.png | 4 +- test/images/simulated/ideal/C09_FC4.png | 4 +- test/images/simulated/ideal/C09_FD1.png | 4 +- test/images/simulated/ideal/C09_FD2.png | 4 +- test/images/simulated/ideal/C09_FD3.png | 4 +- test/images/simulated/ideal/C09_FD4.png | 4 +- test/images/simulated/ideal/C09_FE1.png | 4 +- test/images/simulated/ideal/C09_FE2.png | 4 +- test/images/simulated/ideal/C09_FE3.png | 4 +- test/images/simulated/ideal/C10_FA0.png | 4 +- test/images/simulated/ideal/C10_FB1.png | 4 +- test/images/simulated/ideal/C10_FB2.png | 4 +- test/images/simulated/ideal/C10_FB3.png | 4 +- test/images/simulated/ideal/C10_FB4.png | 4 +- test/images/simulated/ideal/C10_FC1.png | 4 +- test/images/simulated/ideal/C10_FC2.png | 4 +- test/images/simulated/ideal/C10_FC3.png | 4 +- test/images/simulated/ideal/C10_FC4.png | 4 +- test/images/simulated/ideal/C10_FD1.png | 4 +- test/images/simulated/ideal/C10_FD2.png | 4 +- test/images/simulated/ideal/C10_FD3.png | 4 +- test/images/simulated/ideal/C10_FD4.png | 4 +- test/images/simulated/ideal/C10_FE1.png | 4 +- test/images/simulated/ideal/C10_FE2.png | 4 +- test/images/simulated/ideal/C10_FE3.png | 4 +- test/test_extrinsic_calibration.py | 42 +++++++++---------- 163 files changed, 370 insertions(+), 354 deletions(-) diff --git a/src/common/math.py b/src/common/math.py index 5da8b63..8a9cddd 100644 --- a/src/common/math.py +++ b/src/common/math.py @@ -151,9 +151,9 @@ def __mul__(self, other) -> 'Matrix4x4': result_numpy_array = numpy.matmul(self.as_numpy_array(), other.as_numpy_array()) return Matrix4x4(values=list(result_numpy_array.flatten())) - def get_rotation_as_quaternion(self) -> list[float]: + def get_rotation_as_quaternion(self, canonical: bool = False) -> list[float]: # noinspection PyArgumentList - return Rotation.from_matrix(self.as_numpy_array()[0:3, 0:3]).as_quat().tolist() + return Rotation.from_matrix(self.as_numpy_array()[0:3, 0:3]).as_quat(canonical=canonical).tolist() def get_translation(self) -> list[float]: """ diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py index d500a8e..146e3ba 100644 --- a/src/implementations/extrinsic_charuco_opencv.py +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -24,7 +24,9 @@ _MAX_FLOAT: Final[float] = sys.float_info.max _TERMINATION_ITERATION_COUNT: Final[int] = 500 _TERMINATION_ROTATION_CHANGE_DEGREES: Final[float] = 0.05 -_TERMINATION_TRANSLATION_CHANGE: Final[float] = 0.05 +_TERMINATION_TRANSLATION_CHANGE: Final[float] = 0.5 + +_DEBUG_ANNOTATIONS: Final[bool] = False class _ImageData(BaseModel): @@ -135,6 +137,11 @@ def _annotate_image( aruco_detector_parameters=aruco_detector_parameters, aruco_dictionary=aruco_dictionary, image_greyscale=image_greyscale) + if _DEBUG_ANNOTATIONS: + for annotation in annotations: + cv2.drawMarker(img=image_rgb, position=(int(annotation.x_px), int(annotation.y_px)), color=(0, 255, 0)) + cv2.imshow("Test", image_rgb) + cv2.waitKey(0) return annotations def _calculate_implementation( @@ -190,10 +197,11 @@ def _calculate_implementation( timestamp_utc_iso8601=metadata.timestamp_utc_iso8601, detector_label=metadata.detector_label) intrinsic_parameters: IntrinsicParameters = detector_intrinsics_by_label[metadata.detector_label] - initial_to_reference: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + reference_to_initial: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( annotations=image_data.annotations, landmarks=charuco_target.landmarks, detector_intrinsics=intrinsic_parameters) + initial_to_reference: Matrix4x4 = reference_to_initial.inverse() detector: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) detector.initial_to_reference = initial_to_reference detector.refined_to_reference = initial_to_reference @@ -244,24 +252,36 @@ def _calculate_implementation( for detector_data in data.detectors: landmarks: list[Landmark] = list() annotations: list[Annotation] = list() - for timestamp_data in data.timestamps: + for timestamp_index, timestamp_data in enumerate(data.timestamps): for feature_data in timestamp_data.features: + timestamped_feature_label: str = \ + f"{feature_data.feature_label}{Annotation.RELATION_CHARACTER}{timestamp_index}" for image_data in timestamp_data.images: + if image_data.detector_label != detector_data.detector_label: + continue for annotation in image_data.annotations: if annotation.feature_label == feature_data.feature_label and \ feature_data.position is not None: - landmarks.append(feature_data.position) - annotations.append(annotation) - refined_to_reference: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + landmarks.append(Landmark( + feature_label=timestamped_feature_label, + x=feature_data.position.x, + y=feature_data.position.y, + z=feature_data.position.z)) + annotations.append(Annotation( + feature_label=timestamped_feature_label, + x_px=annotation.x_px, + y_px=annotation.y_px)) + reference_to_refined: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( annotations=annotations, landmarks=landmarks, detector_intrinsics=detector_data.intrinsic_parameters) + refined_to_reference: Matrix4x4 = reference_to_refined.inverse() translation_change: float = numpy.linalg.norm( numpy.asarray(refined_to_reference.get_translation()) - numpy.asarray(detector_data.refined_to_reference.get_translation())) - old_to_reference: numpy.ndarray = detector_data.refined_to_reference.as_numpy_array() - reference_to_refined: numpy.ndarray = refined_to_reference.inverse().as_numpy_array() - old_to_refined: numpy.ndarray = numpy.matmul(reference_to_refined, old_to_reference) + old_to_refined: numpy.ndarray = numpy.matmul( + reference_to_refined.as_numpy_array(), + detector_data.refined_to_reference.as_numpy_array()) # noinspection PyArgumentList rotation_change_degrees: float = \ numpy.linalg.norm(Rotation.from_matrix(old_to_refined[0:3, 0:3]).as_rotvec(degrees=True)) diff --git a/test/images/simulated/ideal/C01_FA0.png b/test/images/simulated/ideal/C01_FA0.png index 8dd9976..e0015e0 100644 --- a/test/images/simulated/ideal/C01_FA0.png +++ b/test/images/simulated/ideal/C01_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:685c5a9a121ac1b83d1a93c1fe2da23a1b5984791458c371035b4877099998a1 -size 1422324 +oid sha256:6bf9f47804b1a43a39bcf184c2a9ce46b7fe71bab34a8d458ac93132c01d256e +size 1232577 diff --git a/test/images/simulated/ideal/C01_FB1.png b/test/images/simulated/ideal/C01_FB1.png index e24d09c..50edd49 100644 --- a/test/images/simulated/ideal/C01_FB1.png +++ b/test/images/simulated/ideal/C01_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ad8184557b4d46a0b43add7565c7bc272a5a2cbfc7e808caabcb4ff06973e1f -size 1447510 +oid sha256:558de379919c9c7ff33b417f201e667fa73217be10903b5c616abce0514aa878 +size 1284285 diff --git a/test/images/simulated/ideal/C01_FB2.png b/test/images/simulated/ideal/C01_FB2.png index 902234b..a23c5a2 100644 --- a/test/images/simulated/ideal/C01_FB2.png +++ b/test/images/simulated/ideal/C01_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85b79ba5366fd9fb4f273fe7662b7b087ae3d9eadfbcdce2c9880b8d6c6f1910 -size 1470896 +oid sha256:7224f0f49e8cd507d04b420e0b0599960c847eff6dea2cafd041d0e4a816396c +size 1277557 diff --git a/test/images/simulated/ideal/C01_FB3.png b/test/images/simulated/ideal/C01_FB3.png index c401861..3acdb73 100644 --- a/test/images/simulated/ideal/C01_FB3.png +++ b/test/images/simulated/ideal/C01_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef8f5d2582689662ffbf4e1b4afeeb9e8c2ce214f68ae909b8c2c3bcb0a410ed -size 1446378 +oid sha256:93a6655fbd38573abdfe49fbb17b7d3ff5228f7fb38c23405a0c0564f140f6dc +size 1282160 diff --git a/test/images/simulated/ideal/C01_FB4.png b/test/images/simulated/ideal/C01_FB4.png index 4f1cb35..06094f8 100644 --- a/test/images/simulated/ideal/C01_FB4.png +++ b/test/images/simulated/ideal/C01_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e59e45007a7da75c3ee75208ccc7a275beeac1f82f0faefb241e57e4bcd1f16 -size 1463333 +oid sha256:c9dfc36745eeb7d89ed49a2a2bfe1b27729620eebde33260bb2027c616493d20 +size 1279933 diff --git a/test/images/simulated/ideal/C01_FC1.png b/test/images/simulated/ideal/C01_FC1.png index 502e13e..137be2d 100644 --- a/test/images/simulated/ideal/C01_FC1.png +++ b/test/images/simulated/ideal/C01_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96e55f09733e970f45a6f4f9bc5ba27ae6feeb07915b4d8698edcd791949d0e0 -size 1444312 +oid sha256:09e9ef2bd6364c71bfd298f97a1b2ef258c6d5128f3a7e387461c94ba5b08ddb +size 1257919 diff --git a/test/images/simulated/ideal/C01_FC2.png b/test/images/simulated/ideal/C01_FC2.png index ce5d2eb..5c354b6 100644 --- a/test/images/simulated/ideal/C01_FC2.png +++ b/test/images/simulated/ideal/C01_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35b876f631136b7df6a49b2c40c569950041dc81cc5af3c82901c098f34b68d1 -size 1424405 +oid sha256:6767c58ab6f60207a16c83c3fdba25062444bdad689068683878ce6b409b97e3 +size 1229439 diff --git a/test/images/simulated/ideal/C01_FC3.png b/test/images/simulated/ideal/C01_FC3.png index d3c3de5..e9fd0f0 100644 --- a/test/images/simulated/ideal/C01_FC3.png +++ b/test/images/simulated/ideal/C01_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1e6f8dda4978cc95de3e5e0b9615515c5f672faf8b44ee6756f2065b4e01273 -size 1442862 +oid sha256:3df485628ef87d73b6c0e4d26a2b12a9e7a5f70bcdb47decc4fea4c0571c84d0 +size 1257269 diff --git a/test/images/simulated/ideal/C01_FC4.png b/test/images/simulated/ideal/C01_FC4.png index 95b09fa..7c9ecd2 100644 --- a/test/images/simulated/ideal/C01_FC4.png +++ b/test/images/simulated/ideal/C01_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c71a738b4ece5f0999f4b334b90af73133f80d08c1de2bb43bf09ec536e5056 -size 1424085 +oid sha256:dc7ceaca31583454d21df589a6493f13abd5f22dd74c037ddf79eacdbcb5d9db +size 1234268 diff --git a/test/images/simulated/ideal/C01_FD1.png b/test/images/simulated/ideal/C01_FD1.png index 007961e..0e90ec8 100644 --- a/test/images/simulated/ideal/C01_FD1.png +++ b/test/images/simulated/ideal/C01_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e029b8515cbf46e59ad23cd1b1d410d2038cb58fb065c4093fa22127bfdb098d -size 1463965 +oid sha256:08e7e25d39f17bbf0900e559fd06564d2b45555b670011198f14380c05406a31 +size 1300928 diff --git a/test/images/simulated/ideal/C01_FD2.png b/test/images/simulated/ideal/C01_FD2.png index 1c32646..b8d03d1 100644 --- a/test/images/simulated/ideal/C01_FD2.png +++ b/test/images/simulated/ideal/C01_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:598b4b94dd0fd6eb1b8f68517b2d0ef0ec1bdb661e305f06d92c4f9433e32ed9 -size 1381951 +oid sha256:e026840a4d346a8619dc1e6ccc44dccbad916a8782e81e1db113edfb297a3ad5 +size 1204230 diff --git a/test/images/simulated/ideal/C01_FD3.png b/test/images/simulated/ideal/C01_FD3.png index d3f89e4..2e2adde 100644 --- a/test/images/simulated/ideal/C01_FD3.png +++ b/test/images/simulated/ideal/C01_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03953d988f02d65d562c7fde2350825b1c9401e53a19b291e9385002458843e4 -size 1464705 +oid sha256:3a5bb94ca24c881e7c6d079487d2cd1eb358dd91acc5ab25a4936c896b745182 +size 1304665 diff --git a/test/images/simulated/ideal/C01_FD4.png b/test/images/simulated/ideal/C01_FD4.png index fb952a0..0d8eea7 100644 --- a/test/images/simulated/ideal/C01_FD4.png +++ b/test/images/simulated/ideal/C01_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:155bced4c7fd47c303924629345ff649cd50e58eaffc807c6b81e9729be0ad52 -size 1380687 +oid sha256:85404d91dfa7290ecfba48e61d445ebab6fd16f9f861fe41fedacf48a84c3a07 +size 1205644 diff --git a/test/images/simulated/ideal/C01_FE1.png b/test/images/simulated/ideal/C01_FE1.png index cb66017..692a6b0 100644 --- a/test/images/simulated/ideal/C01_FE1.png +++ b/test/images/simulated/ideal/C01_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66d939a5b78ed2c25ce4026494e7c7218a9708dee7723ff39570081a24464895 -size 1372376 +oid sha256:68f686718bfbc4468efdf641ee72ea96112604c7d52fb85c4411d64c99f3f3c3 +size 1273829 diff --git a/test/images/simulated/ideal/C01_FE2.png b/test/images/simulated/ideal/C01_FE2.png index e8d645f..91d7b51 100644 --- a/test/images/simulated/ideal/C01_FE2.png +++ b/test/images/simulated/ideal/C01_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7751a9bc31b8dc4bb9fb6d7cf1cd9301c1da717623501b5c5dbe493625441a8 -size 1399105 +oid sha256:1c982ec501acb240012df7641cfb2c6cff56d8b83ca07ec92d350393ed31a384 +size 1249297 diff --git a/test/images/simulated/ideal/C01_FE3.png b/test/images/simulated/ideal/C01_FE3.png index 177b381..96d86ed 100644 --- a/test/images/simulated/ideal/C01_FE3.png +++ b/test/images/simulated/ideal/C01_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78ace4130b9457bbcce73ad5af187941eb0dc641b1d572f6c91b752a550d3349 -size 1383902 +oid sha256:4b554f4ed5b1e86e967c907a45535b784cbe892eb2f939ec11cf8cc8afcfd455 +size 1203610 diff --git a/test/images/simulated/ideal/C02_FA0.png b/test/images/simulated/ideal/C02_FA0.png index e47c3bd..e79ace3 100644 --- a/test/images/simulated/ideal/C02_FA0.png +++ b/test/images/simulated/ideal/C02_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:324e138e4791dee48f87874d710d6f7e15f8c08acea211088aad2bd2a6c523f8 -size 1433821 +oid sha256:d7e3cd025bfac00bf2ab9d5e400c457481833d5629fc78576716a02f0f633ac5 +size 1288497 diff --git a/test/images/simulated/ideal/C02_FB1.png b/test/images/simulated/ideal/C02_FB1.png index 83b9b72..7735c5f 100644 --- a/test/images/simulated/ideal/C02_FB1.png +++ b/test/images/simulated/ideal/C02_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a331d0c31c8aea1f2504cc0b1ee67ec6f41ae5356536786d738afec99b18758d -size 1428455 +oid sha256:75abf5a3c79e6a64d02034ac47ae376338f1be0b872e5239eae74f8dac202669 +size 1279945 diff --git a/test/images/simulated/ideal/C02_FB2.png b/test/images/simulated/ideal/C02_FB2.png index 14678cc..c87047a 100644 --- a/test/images/simulated/ideal/C02_FB2.png +++ b/test/images/simulated/ideal/C02_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:244b9fc75fa1778641350cabf4f81f14daa88a68338abbf9b3d0b9bc5f1b6b25 -size 1434883 +oid sha256:a27d9f28322eca9183f0160dd0fa7ad062b2742a0d6b0f6f40bd24d0c13af95f +size 1292887 diff --git a/test/images/simulated/ideal/C02_FB3.png b/test/images/simulated/ideal/C02_FB3.png index eaddd37..3b92667 100644 --- a/test/images/simulated/ideal/C02_FB3.png +++ b/test/images/simulated/ideal/C02_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f905031638871b79ea8f253acc0d362c60be61c773ad1027b0c810af1a27d2a3 -size 1414630 +oid sha256:d1fdf94f6794c071fcd8fa0a6f58663333f374167b7d9c2af99a001361040fbd +size 1263241 diff --git a/test/images/simulated/ideal/C02_FB4.png b/test/images/simulated/ideal/C02_FB4.png index c8bc493..d122a6f 100644 --- a/test/images/simulated/ideal/C02_FB4.png +++ b/test/images/simulated/ideal/C02_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fdac47abf6a57c1545f6dd1af6d3876384d02fe8d1a0e97856f3c022b5d1ad5b -size 1443043 +oid sha256:08cc29ca7aa5aaa47f284df4b1e61ce60860f74080b58d66874b299b4ebbbc90 +size 1278370 diff --git a/test/images/simulated/ideal/C02_FC1.png b/test/images/simulated/ideal/C02_FC1.png index 4e925ce..94777fd 100644 --- a/test/images/simulated/ideal/C02_FC1.png +++ b/test/images/simulated/ideal/C02_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9373dfac6f53b2955c46c0003217b2832fcdb253379717d835fa8e9fce3c021 -size 1426021 +oid sha256:c39a7a1051bae31f22f52b88b09f72db3fa67e35d77d4c469225f4063da70b06 +size 1240421 diff --git a/test/images/simulated/ideal/C02_FC2.png b/test/images/simulated/ideal/C02_FC2.png index aef5f3a..b2823b2 100644 --- a/test/images/simulated/ideal/C02_FC2.png +++ b/test/images/simulated/ideal/C02_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:363ce035de49af2d894815d0c6557c992d4e7ff1cf785c9fcb2f091b910064f5 -size 1412680 +oid sha256:5074264a809c6ec13e240a45f1d04f402b5a32a3462c8d5e92a34d07f181ba09 +size 1226174 diff --git a/test/images/simulated/ideal/C02_FC3.png b/test/images/simulated/ideal/C02_FC3.png index 369f252..9576c30 100644 --- a/test/images/simulated/ideal/C02_FC3.png +++ b/test/images/simulated/ideal/C02_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22b5b8c060a4cadc58657215cf01f39c1e3fa5f469f0730aa229eb2ac064f2e6 -size 1412412 +oid sha256:e47bdf7f4892bbda429d66395e5c445f15d955268f2697f5844a3bc73b4e6821 +size 1219472 diff --git a/test/images/simulated/ideal/C02_FC4.png b/test/images/simulated/ideal/C02_FC4.png index e8d5779..7a393f8 100644 --- a/test/images/simulated/ideal/C02_FC4.png +++ b/test/images/simulated/ideal/C02_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40e4f25fca2bb0c681e75d1241f3388cf19d1f48962be69caa928c10e8c522fc -size 1399049 +oid sha256:f2edfb3fb0a60792694ee171abd7ebb52aea79d99e8ceaae4cf714f52928441b +size 1218974 diff --git a/test/images/simulated/ideal/C02_FD1.png b/test/images/simulated/ideal/C02_FD1.png index f2023cc..735dbf1 100644 --- a/test/images/simulated/ideal/C02_FD1.png +++ b/test/images/simulated/ideal/C02_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a18f86293ff877a55b6a00abd111e63f89fd6bd63bc21c7a7932ba7227d15422 -size 1330835 +oid sha256:c5af5bd3ee12bcdfdb7cbd5501e71d5fcb83364bac7014716580db341cc7f469 +size 1190362 diff --git a/test/images/simulated/ideal/C02_FD2.png b/test/images/simulated/ideal/C02_FD2.png index 3ae2a0b..07bfb33 100644 --- a/test/images/simulated/ideal/C02_FD2.png +++ b/test/images/simulated/ideal/C02_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a51df2176033567d59d95119f3bad99c43e8ea85121714ebb879fa3fd91c4a90 -size 1440705 +oid sha256:4f8ca0bb256270a6ad186fe55b04906a4ca21669bcbab83cf12480a2a2036bc0 +size 1274873 diff --git a/test/images/simulated/ideal/C02_FD3.png b/test/images/simulated/ideal/C02_FD3.png index 94a4987..7b73797 100644 --- a/test/images/simulated/ideal/C02_FD3.png +++ b/test/images/simulated/ideal/C02_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4105950ed15d27ec43c6873a9142f81a475a498ad59385416d5ad19314098afb -size 1457314 +oid sha256:26d7164c7467a1727ae9438feb17bbfaeb972e4dd18f36d0207993658cd1b9fa +size 1291826 diff --git a/test/images/simulated/ideal/C02_FD4.png b/test/images/simulated/ideal/C02_FD4.png index a9643b1..f35ae5f 100644 --- a/test/images/simulated/ideal/C02_FD4.png +++ b/test/images/simulated/ideal/C02_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50be95b8b1585bc35b1faf49740e962af841f97feb7340e23b8f892cc1c894af -size 1329991 +oid sha256:9bc88eaacd404c4ce155445963eb860a9f6bc1e005096d92a6850f1ee4d2d594 +size 1185253 diff --git a/test/images/simulated/ideal/C02_FE1.png b/test/images/simulated/ideal/C02_FE1.png index 668c22c..ea4778c 100644 --- a/test/images/simulated/ideal/C02_FE1.png +++ b/test/images/simulated/ideal/C02_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95e37f76c242537b07eee96ac32fab2a9ee1d1c4ccba97492e986ad2078a40bf -size 1416192 +oid sha256:e5af866e4afed0c3d3efd691a27c02be41eaf4aae68165ef09cb380699efac75 +size 1310222 diff --git a/test/images/simulated/ideal/C02_FE2.png b/test/images/simulated/ideal/C02_FE2.png index 76e66ea..582baf6 100644 --- a/test/images/simulated/ideal/C02_FE2.png +++ b/test/images/simulated/ideal/C02_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54d6f219c4ed29eee36a282ddd3bff2a27e9f7e887fd48ce54ba335044a5c613 -size 1439930 +oid sha256:22585f081780eda6d6a796806c790f3d7e0289e41826d048908e5353b1c81370 +size 1306641 diff --git a/test/images/simulated/ideal/C02_FE3.png b/test/images/simulated/ideal/C02_FE3.png index 3121d55..684bb84 100644 --- a/test/images/simulated/ideal/C02_FE3.png +++ b/test/images/simulated/ideal/C02_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:746e74948b4294c43924f2e9ef9c4a16c187f1152dfc49470cd9cf165a808370 -size 1360805 +oid sha256:f0ca123c94385f477c4cc776f5512a9f897411ffda6016b2039809c13d2e8f72 +size 1206274 diff --git a/test/images/simulated/ideal/C03_FA0.png b/test/images/simulated/ideal/C03_FA0.png index efcc729..2c7e078 100644 --- a/test/images/simulated/ideal/C03_FA0.png +++ b/test/images/simulated/ideal/C03_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fdc44679f5ae9b8c78042c5865c6689bf24671a71c332cd2848859f49f3a44a0 -size 1372959 +oid sha256:d89b40adab42eebf4d5a110c50a7a4b6be2fca602a51dc9e6d412185a0f4d83d +size 1233551 diff --git a/test/images/simulated/ideal/C03_FB1.png b/test/images/simulated/ideal/C03_FB1.png index 29c9ae0..4033c2b 100644 --- a/test/images/simulated/ideal/C03_FB1.png +++ b/test/images/simulated/ideal/C03_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df665424ab1df581d0304e66d8a82355069552eb77bc628bac77e18dc036abd1 -size 1406590 +oid sha256:c297262251e7e0e92dcb1bb43dc53d68b086eeffb89da07235a98b57380323b9 +size 1298337 diff --git a/test/images/simulated/ideal/C03_FB2.png b/test/images/simulated/ideal/C03_FB2.png index 41313d4..92ad8d2 100644 --- a/test/images/simulated/ideal/C03_FB2.png +++ b/test/images/simulated/ideal/C03_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7d1b196444d4d742910210d66d9f4a7bf2cd3170f363d4ef2bb4d223dbc62676 -size 1406352 +oid sha256:80b64da1dd31e8d4e806468e3e58b5cbfc669cc92d5a8f9cd5576cc0d99f06dd +size 1294380 diff --git a/test/images/simulated/ideal/C03_FB3.png b/test/images/simulated/ideal/C03_FB3.png index d0fc5e7..66dae32 100644 --- a/test/images/simulated/ideal/C03_FB3.png +++ b/test/images/simulated/ideal/C03_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73b9a7c365034b99ee54292c3555091db0aaccb91a54b9a8711d13828b39739c -size 1397026 +oid sha256:5eb977fff5b69488220f35be0d48e09ff510fd34506807100e854645392f773b +size 1265355 diff --git a/test/images/simulated/ideal/C03_FB4.png b/test/images/simulated/ideal/C03_FB4.png index 3ec181f..127499c 100644 --- a/test/images/simulated/ideal/C03_FB4.png +++ b/test/images/simulated/ideal/C03_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b340c2ed01d779889bc9ab5b8a977cc92534952cb3ab5a76207854017825c5d -size 1394740 +oid sha256:cd3208aea61c558ee9bebe0e452d82111d2d511a4555d87202eac14babae4cb1 +size 1267374 diff --git a/test/images/simulated/ideal/C03_FC1.png b/test/images/simulated/ideal/C03_FC1.png index 1ef97e3..2c9e8e8 100644 --- a/test/images/simulated/ideal/C03_FC1.png +++ b/test/images/simulated/ideal/C03_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1a76d894119d48f8bdef1ffbc92775ca4e7ec52923da2109737d3dff201e492 -size 1402553 +oid sha256:9f56d055980cf7268e9f6357be5d25713e6d07a96d5e5e59c92ad85993b8b87b +size 1262504 diff --git a/test/images/simulated/ideal/C03_FC2.png b/test/images/simulated/ideal/C03_FC2.png index 3cc1bb8..fb83d1d 100644 --- a/test/images/simulated/ideal/C03_FC2.png +++ b/test/images/simulated/ideal/C03_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5958e875ed1f28fffc5847023178e1ddd1193ff3ef1540231a238fac9af9742 -size 1404246 +oid sha256:27f21ff35b63cdf85dac5687ec9d3bf32817b5f68b1bf03548b0985ce54378fc +size 1261828 diff --git a/test/images/simulated/ideal/C03_FC3.png b/test/images/simulated/ideal/C03_FC3.png index 8bb895c..89c7120 100644 --- a/test/images/simulated/ideal/C03_FC3.png +++ b/test/images/simulated/ideal/C03_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b64857b328d257f1505710466fe4df463948e5469268233ad16cdbedef60b33d -size 1375851 +oid sha256:2caece8489a8185202296915dcaeaa43b954c559dfc0aa3080d81a2e606de8ff +size 1236088 diff --git a/test/images/simulated/ideal/C03_FC4.png b/test/images/simulated/ideal/C03_FC4.png index fca61fc..22500e1 100644 --- a/test/images/simulated/ideal/C03_FC4.png +++ b/test/images/simulated/ideal/C03_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00d9a2dc917045072e207384e4751de0b77b9c191a3d92eef37e6f45d683bbca -size 1375940 +oid sha256:d73c5dffba357e14a0328da288c32b508710cc7a8962d94b1124a6d0f473ddc6 +size 1236033 diff --git a/test/images/simulated/ideal/C03_FD1.png b/test/images/simulated/ideal/C03_FD1.png index 7a5bd15..7f54b66 100644 --- a/test/images/simulated/ideal/C03_FD1.png +++ b/test/images/simulated/ideal/C03_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e43790d876390a11b2ce2c09eb653a7ac23c9fe8c47358bb766e0eaa22ef606e -size 1424033 +oid sha256:ac4149848f89718be401759aa42f446b05e1ba23b44f0fde0026cb3b6a2be42b +size 1305665 diff --git a/test/images/simulated/ideal/C03_FD2.png b/test/images/simulated/ideal/C03_FD2.png index c629ca5..dc1e76b 100644 --- a/test/images/simulated/ideal/C03_FD2.png +++ b/test/images/simulated/ideal/C03_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7b52ea177e008f5d3900629fb5695864e1fcd2daa902c56ab9f512e851e3726 -size 1424141 +oid sha256:31a47e294c109673a99b208f2b13180d7ce554ede3ad667dcd27452fb0e39b82 +size 1302756 diff --git a/test/images/simulated/ideal/C03_FD3.png b/test/images/simulated/ideal/C03_FD3.png index 1b4d576..347bfa2 100644 --- a/test/images/simulated/ideal/C03_FD3.png +++ b/test/images/simulated/ideal/C03_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11f6df1a9c47d0d0619bb78b4ed1b16f4154abba48c383ca005faf4b83f22385 -size 1337620 +oid sha256:10f7d0573e8abce21d2adbe95881ed0762187382867e4568dd385828b8c6aee8 +size 1206465 diff --git a/test/images/simulated/ideal/C03_FD4.png b/test/images/simulated/ideal/C03_FD4.png index bc36717..a4c41dd 100644 --- a/test/images/simulated/ideal/C03_FD4.png +++ b/test/images/simulated/ideal/C03_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70a1c6df1305bca4da1ddba629bb4097524c0f7a236d201597798f72d847098c -size 1336490 +oid sha256:f47ddf5f0f74e8a28a21194bc16c9869ac8d54abab47d09b3b5fb3e96021fd81 +size 1204077 diff --git a/test/images/simulated/ideal/C03_FE1.png b/test/images/simulated/ideal/C03_FE1.png index 21daf7f..3043adc 100644 --- a/test/images/simulated/ideal/C03_FE1.png +++ b/test/images/simulated/ideal/C03_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8703bded231856398f1c7be44c01aa310ff9cafe170060065694719fc7024f2f -size 1429249 +oid sha256:51517b3a58198e53e3f266e41a3da5c61d6f66726dcbaf419b4af0d04a970f3b +size 1296973 diff --git a/test/images/simulated/ideal/C03_FE2.png b/test/images/simulated/ideal/C03_FE2.png index 2c286ad..bc30587 100644 --- a/test/images/simulated/ideal/C03_FE2.png +++ b/test/images/simulated/ideal/C03_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7f0304656f31db8bdadf674e0c1cae8396feade820a95a16168cd0051309f34 -size 1221456 +oid sha256:b304bd98f7b64ba63797d2c2f40142f8f61d10c8ef772381e970cdbdb40e067c +size 1170676 diff --git a/test/images/simulated/ideal/C03_FE3.png b/test/images/simulated/ideal/C03_FE3.png index e849e13..4d79a60 100644 --- a/test/images/simulated/ideal/C03_FE3.png +++ b/test/images/simulated/ideal/C03_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e421f8ac446563fb2fc04538748d5d4b193dcdc9b19029ed6de6fb5d178c866f -size 1422364 +oid sha256:fce3ca475607e56e828766122fd57d922c3f16aa6a15cb2bb83009103ec6ba57 +size 1285763 diff --git a/test/images/simulated/ideal/C04_FA0.png b/test/images/simulated/ideal/C04_FA0.png index 9a792ea..c2d6f3c 100644 --- a/test/images/simulated/ideal/C04_FA0.png +++ b/test/images/simulated/ideal/C04_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9545fccc61f2909fd914459569d91616635cd6ddda0336bc4724f63672913279 -size 1434298 +oid sha256:31a64eb3cfd1ac8a6cc5bbd143f57ad62aaae98073aec268e0592d7293dcc52d +size 1288973 diff --git a/test/images/simulated/ideal/C04_FB1.png b/test/images/simulated/ideal/C04_FB1.png index f33a90a..f0a63f9 100644 --- a/test/images/simulated/ideal/C04_FB1.png +++ b/test/images/simulated/ideal/C04_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17bc9b8464eb95d3d7941f40c2f3d01d7a88a9cfc8ffc53a6304ee54c8cc542d -size 1440510 +oid sha256:b37872871dd20d0561f62f015d325a2f5253d35f30f7f0b2a99c28aa362fb9d6 +size 1306500 diff --git a/test/images/simulated/ideal/C04_FB2.png b/test/images/simulated/ideal/C04_FB2.png index c044b35..10f0a38 100644 --- a/test/images/simulated/ideal/C04_FB2.png +++ b/test/images/simulated/ideal/C04_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4463bd6d13cf500ffb8e382c1cc7540726afe542b7106932ec78fb421ce13999 -size 1422684 +oid sha256:61380b9bf1adf7da9a1d54dc94582cbd189ff02be66c3e9753f67288272b004a +size 1274520 diff --git a/test/images/simulated/ideal/C04_FB3.png b/test/images/simulated/ideal/C04_FB3.png index 369e064..7320da7 100644 --- a/test/images/simulated/ideal/C04_FB3.png +++ b/test/images/simulated/ideal/C04_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f297d045f48d0719a65ed092f5615a159517a07dde648d65e988f113fdf72255 -size 1451842 +oid sha256:14380c23953252866e7a3ce44410148a679d4e87c6b49262ccb2a91ed45838b3 +size 1283879 diff --git a/test/images/simulated/ideal/C04_FB4.png b/test/images/simulated/ideal/C04_FB4.png index 436f509..e1bbcb9 100644 --- a/test/images/simulated/ideal/C04_FB4.png +++ b/test/images/simulated/ideal/C04_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19c1086f32daf7f7f3fed31d67642d9286b7f00210a6d9039fadff70cb13f7e9 -size 1417477 +oid sha256:9626a0a347b3bdebfeadd35833e2e6d5d8b42c862b773e1fa7df79d994cbdc36 +size 1279182 diff --git a/test/images/simulated/ideal/C04_FC1.png b/test/images/simulated/ideal/C04_FC1.png index 0fe23b8..46a764c 100644 --- a/test/images/simulated/ideal/C04_FC1.png +++ b/test/images/simulated/ideal/C04_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a930fa75cbc1d6173ff1e72ba7b5e03c83e543d71aaaa4da55445a29362fe8f -size 1410666 +oid sha256:20bd37d9df549c697ddefe8c70a185ef28918a9b48596b46be218cc38d696693 +size 1226535 diff --git a/test/images/simulated/ideal/C04_FC2.png b/test/images/simulated/ideal/C04_FC2.png index 5e2a69c..8eaf531 100644 --- a/test/images/simulated/ideal/C04_FC2.png +++ b/test/images/simulated/ideal/C04_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09e8980178bc4cd969b2ff9fa68d49359fffb21010119b6ad249eac3054bf96c -size 1421464 +oid sha256:340439956f379de6ef50ce7e9295c12c4a4c6aea636a84a54b1d097cac295b8b +size 1238751 diff --git a/test/images/simulated/ideal/C04_FC3.png b/test/images/simulated/ideal/C04_FC3.png index 01a89ac..c565c1b 100644 --- a/test/images/simulated/ideal/C04_FC3.png +++ b/test/images/simulated/ideal/C04_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:093b848b9139de7f3fc10dfe71bbd87f04a48e622200e1eada6b5346f02e9a97 -size 1398790 +oid sha256:2b6fec4fd7160f0cabd33254396030fdc9da094ec23e66f66ad8e84cc9acc212 +size 1219473 diff --git a/test/images/simulated/ideal/C04_FC4.png b/test/images/simulated/ideal/C04_FC4.png index 2825ec3..4c6a211 100644 --- a/test/images/simulated/ideal/C04_FC4.png +++ b/test/images/simulated/ideal/C04_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4427b70592637bf9ea7f65cea1f99694de23537ff56663cb59c5b8ccc687890f -size 1410892 +oid sha256:6663e15497f69af6cb928f250bdd93f97a1a7eeace7fc73c935ae53731129cd6 +size 1219918 diff --git a/test/images/simulated/ideal/C04_FD1.png b/test/images/simulated/ideal/C04_FD1.png index b84af6e..123c7ca 100644 --- a/test/images/simulated/ideal/C04_FD1.png +++ b/test/images/simulated/ideal/C04_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fe7281ec4e3dfdd181b1de92a451ec899789a213f469f500169f8dedca8906d -size 1456787 +oid sha256:94a593dfa33400f1c2533ffb6a17f6f53ebc089f28a2f837045349e2241df05f +size 1291913 diff --git a/test/images/simulated/ideal/C04_FD2.png b/test/images/simulated/ideal/C04_FD2.png index 25061cc..29e5df6 100644 --- a/test/images/simulated/ideal/C04_FD2.png +++ b/test/images/simulated/ideal/C04_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4ae19c7356827a265a67cf9476c32d9df202e291dda272678ab3ee71a9e1068 -size 1330143 +oid sha256:7f94a8c8eebaf17ff514b5f217404cecfeb362c580b388649c361428c7bc4346 +size 1189579 diff --git a/test/images/simulated/ideal/C04_FD3.png b/test/images/simulated/ideal/C04_FD3.png index 9184eaa..fd1368f 100644 --- a/test/images/simulated/ideal/C04_FD3.png +++ b/test/images/simulated/ideal/C04_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d32c4abb5b80e2d7e2d9855290cecec46c13e656adf6af70d8e72c586486639 -size 1330479 +oid sha256:db1f75897901d63dfa18b24f016fe4de953865684b57a1893568d5c547d0ab3b +size 1184625 diff --git a/test/images/simulated/ideal/C04_FD4.png b/test/images/simulated/ideal/C04_FD4.png index 3bebc2e..635bb03 100644 --- a/test/images/simulated/ideal/C04_FD4.png +++ b/test/images/simulated/ideal/C04_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f54ebb2e4d9030878f60d0860ad3d88110b7cb2e65dd72ee0bf85ae830d8cc95 -size 1441511 +oid sha256:8bc0fe4fbed76c7183a311d02aae304bf8b1188f114b9e0d6980bf5f60dfee39 +size 1274947 diff --git a/test/images/simulated/ideal/C04_FE1.png b/test/images/simulated/ideal/C04_FE1.png index 5133617..7322772 100644 --- a/test/images/simulated/ideal/C04_FE1.png +++ b/test/images/simulated/ideal/C04_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54d44ca459c22f74831ef027f68a632deda62e933e68e281964af1927459bbc3 -size 1360559 +oid sha256:f5964bd3102387e811a414f225f79e4de9253395b5e71c6118e73b207f055a3f +size 1207453 diff --git a/test/images/simulated/ideal/C04_FE2.png b/test/images/simulated/ideal/C04_FE2.png index ce752a5..3a23fed 100644 --- a/test/images/simulated/ideal/C04_FE2.png +++ b/test/images/simulated/ideal/C04_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53220e5e301ff3f7919b09c566d3cd2023be1d785a6042236b6c299067c5a419 -size 1448002 +oid sha256:b30f4153fcf3e609acb7ad25104deb229207ec911a340e836392a1d2266ed700 +size 1316420 diff --git a/test/images/simulated/ideal/C04_FE3.png b/test/images/simulated/ideal/C04_FE3.png index b5d8d60..cf4e873 100644 --- a/test/images/simulated/ideal/C04_FE3.png +++ b/test/images/simulated/ideal/C04_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8be2aed01f6d944b9f6fa03da10ceafdbd95e35362cc7a8cafefdcf0f32c3d3b -size 1420667 +oid sha256:27a50c95291a43bfd4fd1ead9c5c770cff1e6a12e866efef88eccd80c3914026 +size 1314098 diff --git a/test/images/simulated/ideal/C05_FA0.png b/test/images/simulated/ideal/C05_FA0.png index dba8494..e34f43d 100644 --- a/test/images/simulated/ideal/C05_FA0.png +++ b/test/images/simulated/ideal/C05_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbbf583755725843021c8cbe4dd190b0088b202dede13b4242ff1678832f8a75 -size 1423110 +oid sha256:ace6bd924eddb716cfe45a5f6b0283f369e93178b2011264891a603fa174a4ff +size 1233126 diff --git a/test/images/simulated/ideal/C05_FB1.png b/test/images/simulated/ideal/C05_FB1.png index 831385f..9c7393e 100644 --- a/test/images/simulated/ideal/C05_FB1.png +++ b/test/images/simulated/ideal/C05_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b1098d5fed30220fd2ab7b3a3cea9ada7106352944370d625eb646d69dfac61 -size 1464417 +oid sha256:04b2e333f07bf6d1b70ed29cfc6ea9a3474f0f521823113f67b2630cc6d433a5 +size 1279021 diff --git a/test/images/simulated/ideal/C05_FB2.png b/test/images/simulated/ideal/C05_FB2.png index 5974fc4..c5bebfd 100644 --- a/test/images/simulated/ideal/C05_FB2.png +++ b/test/images/simulated/ideal/C05_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27f6adb43d23e456a76835f321fb12fe904cdeddf9d4992cd21ed42f21df770c -size 1447836 +oid sha256:fc2e08483eb9a8669c3716d4784da2339449e99d3f350454f62e01cc2a7eed72 +size 1281794 diff --git a/test/images/simulated/ideal/C05_FB3.png b/test/images/simulated/ideal/C05_FB3.png index d3ac724..924d09d 100644 --- a/test/images/simulated/ideal/C05_FB3.png +++ b/test/images/simulated/ideal/C05_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3073546fc5b4684634175ec98076d2b2f7cadef10cd859964f4fbe01c43aadbe -size 1471802 +oid sha256:9e2b42600afc08d84c455f68826426dc44ce9ba2e1beaabeb215fba015d6b5d1 +size 1275044 diff --git a/test/images/simulated/ideal/C05_FB4.png b/test/images/simulated/ideal/C05_FB4.png index f00c811..693e6ca 100644 --- a/test/images/simulated/ideal/C05_FB4.png +++ b/test/images/simulated/ideal/C05_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7438fde2cd15be601fd0b47607d56fa74f91052888382a94657fd82b4e5afc6 -size 1447899 +oid sha256:139693e02cca3c936f6d0c4d40eb076bb1f307e3745491f8fa950e9d26074e48 +size 1284170 diff --git a/test/images/simulated/ideal/C05_FC1.png b/test/images/simulated/ideal/C05_FC1.png index a965fbe..274b93d 100644 --- a/test/images/simulated/ideal/C05_FC1.png +++ b/test/images/simulated/ideal/C05_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:233ec4ce71d77e35a41d7a00086be4a0dc86c72961099cb037adfb7436535180 -size 1424651 +oid sha256:b4997ee771be0b9253258780257a6795ef4fcbd8b3e40470c365331276ade2ac +size 1234950 diff --git a/test/images/simulated/ideal/C05_FC2.png b/test/images/simulated/ideal/C05_FC2.png index fa8cc6c..f2fe746 100644 --- a/test/images/simulated/ideal/C05_FC2.png +++ b/test/images/simulated/ideal/C05_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a77a57dda121f8a8efd70400341bcfa5fe214d367ee944e6e4bb555752b4f2a4 -size 1444030 +oid sha256:423ca9785dd24eb81eb51a15d1790570bbc25f58798fd8e32b47d3f0db390549 +size 1256728 diff --git a/test/images/simulated/ideal/C05_FC3.png b/test/images/simulated/ideal/C05_FC3.png index 6715c23..b1a3869 100644 --- a/test/images/simulated/ideal/C05_FC3.png +++ b/test/images/simulated/ideal/C05_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3830b1bd476319716fde166728f6c80ed8a956ec068206a732a06dd9192addb1 -size 1424731 +oid sha256:cdf218bfb18ef802c87c7277f9c12b8f94115caee7e5474c322c0b50c07e464a +size 1230402 diff --git a/test/images/simulated/ideal/C05_FC4.png b/test/images/simulated/ideal/C05_FC4.png index 20fbf76..23b453c 100644 --- a/test/images/simulated/ideal/C05_FC4.png +++ b/test/images/simulated/ideal/C05_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7436939d708079eab9894eb4d012b79b3af43e94f624d72925f11f8b81e8113 -size 1444359 +oid sha256:8428378e23bd243747f74ab55c3e78bb762562be724924f57e3761c8c48cf9a4 +size 1257375 diff --git a/test/images/simulated/ideal/C05_FD1.png b/test/images/simulated/ideal/C05_FD1.png index 25c151a..d1ba0f3 100644 --- a/test/images/simulated/ideal/C05_FD1.png +++ b/test/images/simulated/ideal/C05_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5d737c201d0c90417c5b2ce490a3a9f323108ac232bee4416a0435b1fd59515 -size 1382511 +oid sha256:c5ae976964212b95cbe005a9805f9d0ca1d189dedfdfc23e2297cd5b7e9405bb +size 1206791 diff --git a/test/images/simulated/ideal/C05_FD2.png b/test/images/simulated/ideal/C05_FD2.png index a35a3ed..02b5ecd 100644 --- a/test/images/simulated/ideal/C05_FD2.png +++ b/test/images/simulated/ideal/C05_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c300dbf4ce5ccfb6a8638a17167ef79da2bf1a65b69dd4a5065c9283ce312d32 -size 1465724 +oid sha256:7f6b4f8981d537b3ebf2ed106960de4d504b99bc1fc8954185d3347af3040151 +size 1306077 diff --git a/test/images/simulated/ideal/C05_FD3.png b/test/images/simulated/ideal/C05_FD3.png index f6b8e87..2dbf067 100644 --- a/test/images/simulated/ideal/C05_FD3.png +++ b/test/images/simulated/ideal/C05_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f41e05bfbd789f16f1447b53cbc6255685e75423bdeffadc3b092c74b777e9d2 -size 1383143 +oid sha256:f56196b324e531155d846d33354818f9eada8c9ea17d98ca6b78fdcde2505b5b +size 1205689 diff --git a/test/images/simulated/ideal/C05_FD4.png b/test/images/simulated/ideal/C05_FD4.png index 6d2f5bb..2feddbe 100644 --- a/test/images/simulated/ideal/C05_FD4.png +++ b/test/images/simulated/ideal/C05_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0bbd64484730d8f94d281bc6e70c417d7789f3a6306102b72cf8709398b3a48 -size 1465517 +oid sha256:4a9403a96a7ff9549aa671b590c6c3c8442ced86fb08bb91d2ae905ff7cd9184 +size 1302322 diff --git a/test/images/simulated/ideal/C05_FE1.png b/test/images/simulated/ideal/C05_FE1.png index ddec8cb..e7fc09a 100644 --- a/test/images/simulated/ideal/C05_FE1.png +++ b/test/images/simulated/ideal/C05_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eef2c6cfc6da51269c6e39416d5bd581f99c7a4575bb7f350f87d0f089007e2c -size 1385547 +oid sha256:aa86cadc666f7742e7cb5246beed8ba652a618fa198f554c60e128d03ac1eb14 +size 1203688 diff --git a/test/images/simulated/ideal/C05_FE2.png b/test/images/simulated/ideal/C05_FE2.png index 0b79678..14c6e18 100644 --- a/test/images/simulated/ideal/C05_FE2.png +++ b/test/images/simulated/ideal/C05_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:303eff0f2bf3650c800c2f2a64c9660441a62e2bd718232685ee6bedc750e3b9 -size 1400681 +oid sha256:2f80a2972b128ba8295600fe0ff62cb5c524b6d47509ec586fb7d836b3056988 +size 1273183 diff --git a/test/images/simulated/ideal/C05_FE3.png b/test/images/simulated/ideal/C05_FE3.png index 61302e4..deccaf4 100644 --- a/test/images/simulated/ideal/C05_FE3.png +++ b/test/images/simulated/ideal/C05_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf013242bec7ffeb3cefb38374b1fde2a5f3543d61e39f39c3cfbffadef5b042 -size 1372127 +oid sha256:82ea393fc378bd4d02c9c34410460c5a4b6d093d3aaedb2a6faabe36e6a8ccbc +size 1270786 diff --git a/test/images/simulated/ideal/C06_FA0.png b/test/images/simulated/ideal/C06_FA0.png index 32ca22c..b7c9188 100644 --- a/test/images/simulated/ideal/C06_FA0.png +++ b/test/images/simulated/ideal/C06_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ad5fffceef21d02fb152249a8d63436f1a0e1d541e11b67d2be32c233ad36e8 -size 1406171 +oid sha256:e58fc7a300deab704a5dd8f7eb3fc4101ebd25bddc5e633f37345b35a33f54de +size 1208671 diff --git a/test/images/simulated/ideal/C06_FB1.png b/test/images/simulated/ideal/C06_FB1.png index 9234af2..6f2ea06 100644 --- a/test/images/simulated/ideal/C06_FB1.png +++ b/test/images/simulated/ideal/C06_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:62e0acfa0f1017f830c70f4ece8993829c50a6cfdf82eb24f92e3e2120686d1d -size 1444707 +oid sha256:b0884d60581cb7cf20b4ac43e3e12112a5245e9708901c3b3f4aae0c021a5212 +size 1256113 diff --git a/test/images/simulated/ideal/C06_FB2.png b/test/images/simulated/ideal/C06_FB2.png index e166dce..6ad27b2 100644 --- a/test/images/simulated/ideal/C06_FB2.png +++ b/test/images/simulated/ideal/C06_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5ad4bc93e0dd7a2e1be50ebea809b1f343fd2069ff267aac2596275a4ee55d6 -size 1467964 +oid sha256:5d4297cdd15687617713d63da9a782973e1b88ebfd753e1ee5daa32043206528 +size 1289015 diff --git a/test/images/simulated/ideal/C06_FB3.png b/test/images/simulated/ideal/C06_FB3.png index b4e26a4..4299319 100644 --- a/test/images/simulated/ideal/C06_FB3.png +++ b/test/images/simulated/ideal/C06_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fc8ab5297a12583366278b562ddb2884a97316a39111f828841678f4e33dace -size 1450365 +oid sha256:bba781bd37866cd3232ead4d6e32a0e4e546cac4171e79c66bb265ecd798baa8 +size 1259215 diff --git a/test/images/simulated/ideal/C06_FB4.png b/test/images/simulated/ideal/C06_FB4.png index 4a83751..aca4997 100644 --- a/test/images/simulated/ideal/C06_FB4.png +++ b/test/images/simulated/ideal/C06_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:879a6bf125418eb369c20d4a2ac45cb4e2cd1838d365c47daf4a6eb4663c0363 -size 1469098 +oid sha256:164e0f7197c513515f222f04f87e3d30133c0867eb4b7518c52906b2c03cbf97 +size 1299267 diff --git a/test/images/simulated/ideal/C06_FC1.png b/test/images/simulated/ideal/C06_FC1.png index 0489b9c..837fc18 100644 --- a/test/images/simulated/ideal/C06_FC1.png +++ b/test/images/simulated/ideal/C06_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3789dcd9c1dcfe07460be57210cd79800326807f58d18b011586630947bc7c5b -size 1434857 +oid sha256:e345ec13535a775fc01aba6a6b0d9e19b4cab67684d9a900362da4e5a78604ce +size 1212068 diff --git a/test/images/simulated/ideal/C06_FC2.png b/test/images/simulated/ideal/C06_FC2.png index 28b87be..754bd82 100644 --- a/test/images/simulated/ideal/C06_FC2.png +++ b/test/images/simulated/ideal/C06_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4ca1a3548c2b5ed860dbea8b0751b32fae9b9857bca50b74d88e88251484762 -size 1415940 +oid sha256:4155addc9fa8934707f9697a1cc74bdb21a7688a713bcecad6b4976e1ad53588 +size 1199468 diff --git a/test/images/simulated/ideal/C06_FC3.png b/test/images/simulated/ideal/C06_FC3.png index feb6072..250ea8b 100644 --- a/test/images/simulated/ideal/C06_FC3.png +++ b/test/images/simulated/ideal/C06_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:304babaccab1b59052d550370497331bcde7cabb97e8e2d6893ccc5581aab64e -size 1433852 +oid sha256:72194fc00184d87b6d5abc4f5cbbe1ccb1c8d17d1e01476fc41cf9f5ebed0707 +size 1211910 diff --git a/test/images/simulated/ideal/C06_FC4.png b/test/images/simulated/ideal/C06_FC4.png index 7b7594c..21bba1c 100644 --- a/test/images/simulated/ideal/C06_FC4.png +++ b/test/images/simulated/ideal/C06_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:251a260f4c9bb1b0bfeb36dfef530662783f54f83fb758ed840c19bf6c05ea4f -size 1415818 +oid sha256:515ee3f6b56a29ac3de1cbceac2adf1eebf9676ef6f35e66ae81e677dd8037cc +size 1204429 diff --git a/test/images/simulated/ideal/C06_FD1.png b/test/images/simulated/ideal/C06_FD1.png index 3a0977d..9e50e0b 100644 --- a/test/images/simulated/ideal/C06_FD1.png +++ b/test/images/simulated/ideal/C06_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54c3c0192c4511bfb80f7cdf8736cfa12200d97e46ff2a9c7ed98a50f118333d -size 1451613 +oid sha256:aa74f68a12524f544858d0634b3892b59df2275ec71bbe0342479bf1ddb99aa5 +size 1283711 diff --git a/test/images/simulated/ideal/C06_FD2.png b/test/images/simulated/ideal/C06_FD2.png index b0b20d5..e873654 100644 --- a/test/images/simulated/ideal/C06_FD2.png +++ b/test/images/simulated/ideal/C06_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:794ba5135469a967b582018d6406fca47251c7c729baa53de3afa3c8f5bc545c -size 1390968 +oid sha256:c03d76b2142bd8e9e37a763ead3090d49e5c88d0fb14abe8a518ff11cbc8b32f +size 1180193 diff --git a/test/images/simulated/ideal/C06_FD3.png b/test/images/simulated/ideal/C06_FD3.png index ff8f6e5..db22f61 100644 --- a/test/images/simulated/ideal/C06_FD3.png +++ b/test/images/simulated/ideal/C06_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1bffd01381cc75aa7624cf690c5275f8ee9b96cb5b906d4faef0746f009069b -size 1457586 +oid sha256:bdcd3504c36f1cab7945209219fc41513b1f76b213931af1964c0a018f0971d4 +size 1283701 diff --git a/test/images/simulated/ideal/C06_FD4.png b/test/images/simulated/ideal/C06_FD4.png index 3b6e1a8..e96ae39 100644 --- a/test/images/simulated/ideal/C06_FD4.png +++ b/test/images/simulated/ideal/C06_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6b31f3997769e9c3435950cc00694815689dad6da2310bba93b7bc96cc8b0c0 -size 1392008 +oid sha256:56b9ca69503cfa481deddf2e8670eddb757be47b3d5a678c6663d4d598f685f5 +size 1193130 diff --git a/test/images/simulated/ideal/C06_FE1.png b/test/images/simulated/ideal/C06_FE1.png index aa57df8..a204a9d 100644 --- a/test/images/simulated/ideal/C06_FE1.png +++ b/test/images/simulated/ideal/C06_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c61f7ad9caf37e732a61acee50f01c19140146135ec7205d75e5ad037fa2629 -size 1406496 +oid sha256:dde0332c757efbec7d07fd047e323148d489080c12913f64336031b73ce4c378 +size 1268617 diff --git a/test/images/simulated/ideal/C06_FE2.png b/test/images/simulated/ideal/C06_FE2.png index 4679ced..fe9f747 100644 --- a/test/images/simulated/ideal/C06_FE2.png +++ b/test/images/simulated/ideal/C06_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ba27216320e72a1e4a88be72cabab6c8749b07285228b183a9cf1265f631ad4 -size 1412298 +oid sha256:a1ead6d1808c8b9d5fab45bd9540c552a8347146df39ba1eca9f81451ac1f211 +size 1235108 diff --git a/test/images/simulated/ideal/C06_FE3.png b/test/images/simulated/ideal/C06_FE3.png index ba83c7f..8ca6443 100644 --- a/test/images/simulated/ideal/C06_FE3.png +++ b/test/images/simulated/ideal/C06_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abd8705796db2f3616fbe87e3bb58f375ea55e78e7e0c2eb55a924c6d68d525f -size 1388445 +oid sha256:b6c5832b289e03c37c154c1b8db2510ccc67863e690099e60a26d30b94f62c0f +size 1182082 diff --git a/test/images/simulated/ideal/C07_FA0.png b/test/images/simulated/ideal/C07_FA0.png index 6f948e3..e6fc719 100644 --- a/test/images/simulated/ideal/C07_FA0.png +++ b/test/images/simulated/ideal/C07_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dacb48879077048a21948605351636728d93844aaaaeaae239cdd0144fc0f066 -size 1455637 +oid sha256:42f3f895971d23b5f6dc33540662c2da0efea8951131ee26844b91c742f15bc2 +size 1284751 diff --git a/test/images/simulated/ideal/C07_FB1.png b/test/images/simulated/ideal/C07_FB1.png index e274a8c..126c02e 100644 --- a/test/images/simulated/ideal/C07_FB1.png +++ b/test/images/simulated/ideal/C07_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ed4a6fcbff63345cc4a75ef7ac64d0aeb6e9581a5f3ce6555e3c07a17a411c8 -size 1441441 +oid sha256:132e947c7b0add4ff087419ebb81ab39b21c8306475ebc91859c5b8a7b5c31a4 +size 1252320 diff --git a/test/images/simulated/ideal/C07_FB2.png b/test/images/simulated/ideal/C07_FB2.png index ca1c83a..8529647 100644 --- a/test/images/simulated/ideal/C07_FB2.png +++ b/test/images/simulated/ideal/C07_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9340b6c0d31abfdb8be3021e5aa933c63b64cb0e7236f7429462bd16fde89b33 -size 1457934 +oid sha256:b10e2084add858f308dbe3001e2b1845efb8758b376603eeecac966c6fdfc1d7 +size 1288518 diff --git a/test/images/simulated/ideal/C07_FB3.png b/test/images/simulated/ideal/C07_FB3.png index b0729ae..dc77a77 100644 --- a/test/images/simulated/ideal/C07_FB3.png +++ b/test/images/simulated/ideal/C07_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f46cb7f327581701870499be6a33c4593703aa469ee7dece6316e8d11979a36a -size 1408004 +oid sha256:3cba23a291e20ab5bde9cedd4560f25fedb0d98b6afa9d18b635b81dd5aa233a +size 1232176 diff --git a/test/images/simulated/ideal/C07_FB4.png b/test/images/simulated/ideal/C07_FB4.png index 27af6fd..f2f35f9 100644 --- a/test/images/simulated/ideal/C07_FB4.png +++ b/test/images/simulated/ideal/C07_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fb3b1eedc401d2827a1389d28767463bb773de08ba10ab470e1b574d8689cae -size 1455624 +oid sha256:56c87b58a7cd45bf0a3f00d58129a086a0b63eae1c84454457ddc0117bc61f8a +size 1279125 diff --git a/test/images/simulated/ideal/C07_FC1.png b/test/images/simulated/ideal/C07_FC1.png index f9eb8c2..eaa5ea9 100644 --- a/test/images/simulated/ideal/C07_FC1.png +++ b/test/images/simulated/ideal/C07_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ffbc10bbbf556e398b26283855ffd8f215f84aa37ff96cc20f42921c314ae2a -size 1418109 +oid sha256:e9f8c81e8998f871eab031fdbac2f417f9cc72f57ba5d7a7c9fd22944a8927da +size 1194565 diff --git a/test/images/simulated/ideal/C07_FC2.png b/test/images/simulated/ideal/C07_FC2.png index 215b817..d3c5b24 100644 --- a/test/images/simulated/ideal/C07_FC2.png +++ b/test/images/simulated/ideal/C07_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef1b3f7e3006bb214e79c9fb89e5dce1376b2d0ec209f7694f2d7c97e23e2ed4 -size 1429889 +oid sha256:51987d6d42597be1523b61d77fb3fed3fead1268fa38fe2c4bd35da4567d9bbb +size 1203638 diff --git a/test/images/simulated/ideal/C07_FC3.png b/test/images/simulated/ideal/C07_FC3.png index 19e2e39..f6d3db8 100644 --- a/test/images/simulated/ideal/C07_FC3.png +++ b/test/images/simulated/ideal/C07_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a971672d74eeeb2feb2ffb81e24a0933d1f414a7768b1a0e2a98af8e36ed39c -size 1416908 +oid sha256:de0f0b898753ffea8640e6d1243493a1647c776ffc8668f6e63c0f979cfcabf4 +size 1196855 diff --git a/test/images/simulated/ideal/C07_FC4.png b/test/images/simulated/ideal/C07_FC4.png index d1e10df..628db69 100644 --- a/test/images/simulated/ideal/C07_FC4.png +++ b/test/images/simulated/ideal/C07_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ed7296fea9065f2c0a1a542355e2f14a07e7a0c9918fa23c997e1f65bfa4806 -size 1408305 +oid sha256:dd7d7dca4b30106257d5d34b82de634a086f72b5df3e7c9b1daf2056c26182a0 +size 1194578 diff --git a/test/images/simulated/ideal/C07_FD1.png b/test/images/simulated/ideal/C07_FD1.png index 3507d38..55dbe18 100644 --- a/test/images/simulated/ideal/C07_FD1.png +++ b/test/images/simulated/ideal/C07_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b68923b96f5e88c82e984df5538100ecf58b10c30ef79d1de24b6d7fea477c7 -size 1347988 +oid sha256:c87bd067d5158728bba9c213c7b5ea85a5afa44dc4a5d422028166aa65878bb7 +size 1179444 diff --git a/test/images/simulated/ideal/C07_FD2.png b/test/images/simulated/ideal/C07_FD2.png index c4b8c68..65516b5 100644 --- a/test/images/simulated/ideal/C07_FD2.png +++ b/test/images/simulated/ideal/C07_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1b4ae9bfb8f1856ca32ae3702aa72199cbd274b015fdde038f00e120eaec133 -size 1451198 +oid sha256:8cc9d9e79c70179ddfa240f17d476ef3b081f25521508f5ca0c13f69408063ef +size 1267416 diff --git a/test/images/simulated/ideal/C07_FD3.png b/test/images/simulated/ideal/C07_FD3.png index 430ec16..d517c81 100644 --- a/test/images/simulated/ideal/C07_FD3.png +++ b/test/images/simulated/ideal/C07_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a39770d23186f675fa9129f20025e524d6b1c7d5e3836d64020a6b8948390fa -size 1462287 +oid sha256:84f3e408e0648048783c748efffa74c49a201139e3bca64a855f030e90c4f13c +size 1274955 diff --git a/test/images/simulated/ideal/C07_FD4.png b/test/images/simulated/ideal/C07_FD4.png index 40b57fe..a8d7c8a 100644 --- a/test/images/simulated/ideal/C07_FD4.png +++ b/test/images/simulated/ideal/C07_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71989ae5355a9df1fda7e164eb1b840c53fd624f36c9e2095afa1bdd66115861 -size 1359418 +oid sha256:3f627215ec71537d06f891b9b03db6c57bb50a940a0441e97ef3d0009dd82d4f +size 1170489 diff --git a/test/images/simulated/ideal/C07_FE1.png b/test/images/simulated/ideal/C07_FE1.png index 63596e5..de54c0e 100644 --- a/test/images/simulated/ideal/C07_FE1.png +++ b/test/images/simulated/ideal/C07_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f552e00d27c043378904fdfaf6dcb94b8991a374771b991794328a57ba59e912 -size 1456098 +oid sha256:cbccde6b1245e197359f930b5b905c0b7b6b2df2950e0a73e32049e0ab622cc4 +size 1305414 diff --git a/test/images/simulated/ideal/C07_FE2.png b/test/images/simulated/ideal/C07_FE2.png index 7835755..ea1c149 100644 --- a/test/images/simulated/ideal/C07_FE2.png +++ b/test/images/simulated/ideal/C07_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:904bb2fe73694beecd372df9fb8beb6b8857aac06e4dc49819c1ae36c38f26f1 -size 1470780 +oid sha256:260e3065666ee2b5d3835dcf086547c906dd9174e671f987dcb3cac3780b5cc6 +size 1299195 diff --git a/test/images/simulated/ideal/C07_FE3.png b/test/images/simulated/ideal/C07_FE3.png index 9238501..359b8cf 100644 --- a/test/images/simulated/ideal/C07_FE3.png +++ b/test/images/simulated/ideal/C07_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c56ac97149e481139993f8153cecdef5d9056da5e3d19beee056566c50d176c -size 1382668 +oid sha256:7e1e3dd84c9e8925cf8a2a28730cf35214ed7746b262d778e60d7634881f3363 +size 1185807 diff --git a/test/images/simulated/ideal/C08_FA0.png b/test/images/simulated/ideal/C08_FA0.png index ac8685e..04d5d0d 100644 --- a/test/images/simulated/ideal/C08_FA0.png +++ b/test/images/simulated/ideal/C08_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c81fb4e0ce9e02eb8f51edc8dd51e68056b3bbfbff79aa868e1f02bd7ff31b7 -size 1406754 +oid sha256:44350a09cea2e7ac972d01a295185ad33b1b89c82f06eacfda61ceafcc21c019 +size 1207331 diff --git a/test/images/simulated/ideal/C08_FB1.png b/test/images/simulated/ideal/C08_FB1.png index e5231e8..fe5be1e 100644 --- a/test/images/simulated/ideal/C08_FB1.png +++ b/test/images/simulated/ideal/C08_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8298e295fe9fa7f532132be667b18c878257fbe337688c74b1dcdf45f612c2ac -size 1456102 +oid sha256:3c56a844a48ceacbf41e07d1c4f1e2ff6e786090ff12b2c66d1ee38559676bf9 +size 1278628 diff --git a/test/images/simulated/ideal/C08_FB2.png b/test/images/simulated/ideal/C08_FB2.png index d355634..910724b 100644 --- a/test/images/simulated/ideal/C08_FB2.png +++ b/test/images/simulated/ideal/C08_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96c3f7663eba411177b4baf47e341436da590c812eb4feef3862a731161b9dc8 -size 1456738 +oid sha256:93cba4618e1727ff75d44d400d0a1fc3717558078cd11b07f9cc6156bdbb3cf1 +size 1272782 diff --git a/test/images/simulated/ideal/C08_FB3.png b/test/images/simulated/ideal/C08_FB3.png index 1dae337..37ed699 100644 --- a/test/images/simulated/ideal/C08_FB3.png +++ b/test/images/simulated/ideal/C08_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24977cdabee932e1b8d4fb764dbd0fc1ac08abf8fce7a9db52559410aa9cf82d -size 1469920 +oid sha256:302d271d44177e890c27c1715fa21e113b0640d6369e0cd071bd4ee799ef4c5e +size 1283928 diff --git a/test/images/simulated/ideal/C08_FB4.png b/test/images/simulated/ideal/C08_FB4.png index b19eb3f..bd04d07 100644 --- a/test/images/simulated/ideal/C08_FB4.png +++ b/test/images/simulated/ideal/C08_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:befc031032a566365352ed5526ae347274c1b461c770ef90faf6039ec8398367 -size 1465215 +oid sha256:ae4b9eae30a5cab4c3cb07595eda9625f64eabdd78ebc73b3a81e4cd133a7cf3 +size 1282322 diff --git a/test/images/simulated/ideal/C08_FC1.png b/test/images/simulated/ideal/C08_FC1.png index 2a8822d..29180cc 100644 --- a/test/images/simulated/ideal/C08_FC1.png +++ b/test/images/simulated/ideal/C08_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9b9a15d49e70dbe03c69eff09ecce5be6dfe6424a054e33f708a8e029322ee0 -size 1434880 +oid sha256:cba2ef2e114f8fc9e097243382318bcc247ac9b72244cedfb990cbcc512fb8af +size 1212558 diff --git a/test/images/simulated/ideal/C08_FC2.png b/test/images/simulated/ideal/C08_FC2.png index d80bd5d..c37aa3c 100644 --- a/test/images/simulated/ideal/C08_FC2.png +++ b/test/images/simulated/ideal/C08_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ce1309c902db34353438ab810b28277df7a850596a297d23f1521d8a3795acb -size 1437551 +oid sha256:090e874012b2a1edc5857ea4b78f0774c8e50afee75cc6d771738e5db807634d +size 1212804 diff --git a/test/images/simulated/ideal/C08_FC3.png b/test/images/simulated/ideal/C08_FC3.png index e29538e..757ea35 100644 --- a/test/images/simulated/ideal/C08_FC3.png +++ b/test/images/simulated/ideal/C08_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1567f6771d92fe4e8772879825fa11226c37d36e124c9a2d68a8549ca08ad0fd -size 1411023 +oid sha256:73522150153ea3f518514e513a4dd5ed47c51c4e7818eb480de1e51a897c0efa +size 1192940 diff --git a/test/images/simulated/ideal/C08_FC4.png b/test/images/simulated/ideal/C08_FC4.png index d25eace..dcb84cf 100644 --- a/test/images/simulated/ideal/C08_FC4.png +++ b/test/images/simulated/ideal/C08_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb9bbe6cd68a56acf594c24040942c02f94b065e13d3743b8f23bada9fce82ea -size 1410493 +oid sha256:7baae4728a077a7dcd0a22c1e5bd3e290fceffbb141217654cbc350de2e2328f +size 1192237 diff --git a/test/images/simulated/ideal/C08_FD1.png b/test/images/simulated/ideal/C08_FD1.png index 8c929c3..2e1422d 100644 --- a/test/images/simulated/ideal/C08_FD1.png +++ b/test/images/simulated/ideal/C08_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac9c13b67c4d88add9284d004518d84e2c6d34d838590d0c411d40f022c65482 -size 1457331 +oid sha256:85cd548f64c8eed0349774f83d0c9c4c9d3d2e5d79606411595fcaad73b8f1c0 +size 1282507 diff --git a/test/images/simulated/ideal/C08_FD2.png b/test/images/simulated/ideal/C08_FD2.png index 5fc7d66..de3c822 100644 --- a/test/images/simulated/ideal/C08_FD2.png +++ b/test/images/simulated/ideal/C08_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66e3ae7a85cab435b377a96a93c94161519a63da4ad51d523f03f0f1aebf04d6 -size 1451626 +oid sha256:10dadff94993f9d607a535dfabbe6ee1ffa47f40b6f3cb2d3512e76f7a847b2d +size 1283288 diff --git a/test/images/simulated/ideal/C08_FD3.png b/test/images/simulated/ideal/C08_FD3.png index aec2405..02f7c38 100644 --- a/test/images/simulated/ideal/C08_FD3.png +++ b/test/images/simulated/ideal/C08_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8dfd8e7675cdbe1fed4d535e9b50fe673ea9324a60bcd2ac3793879a1b01173 -size 1392683 +oid sha256:ec09e6e06acfc0474cd8311da3f956e730489ebcff0fa639548ffd72f9811f79 +size 1193213 diff --git a/test/images/simulated/ideal/C08_FD4.png b/test/images/simulated/ideal/C08_FD4.png index 4754820..4446be7 100644 --- a/test/images/simulated/ideal/C08_FD4.png +++ b/test/images/simulated/ideal/C08_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:210554b1f781ea28d5c9193a04aeaa0a059ed37eb84fb986d9d20c66d952fbd3 -size 1388830 +oid sha256:f2c856395a0923e0d29d3500931137bbc3e2caae0a48cd899f68938b4376e238 +size 1179135 diff --git a/test/images/simulated/ideal/C08_FE1.png b/test/images/simulated/ideal/C08_FE1.png index d91cb9b..35231c0 100644 --- a/test/images/simulated/ideal/C08_FE1.png +++ b/test/images/simulated/ideal/C08_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7929a01499f7a0b6e41c19d4cb5c0c9467b46f0bdae2fc9b7590ad9fea58e98 -size 1453082 +oid sha256:6ca3d9a6773df5d18431610146bcc6c3c2017ae4253da3a6bd508a178b0807de +size 1291841 diff --git a/test/images/simulated/ideal/C08_FE2.png b/test/images/simulated/ideal/C08_FE2.png index fa381b3..18cb47b 100644 --- a/test/images/simulated/ideal/C08_FE2.png +++ b/test/images/simulated/ideal/C08_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f7baaea88b3d6c4cb8fe880b8fa9c888ba5548a85e7b68a8c1da39d5343636c -size 1330413 +oid sha256:5ddccec14ecfb07d481a58372adef25932ff7d7e2f4cc75ef89d1167d39ab532 +size 1177217 diff --git a/test/images/simulated/ideal/C08_FE3.png b/test/images/simulated/ideal/C08_FE3.png index bff8971..71abb5a 100644 --- a/test/images/simulated/ideal/C08_FE3.png +++ b/test/images/simulated/ideal/C08_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20f1457f88997800a7e8f0eba78b5d7e08b6affe2bf4132b0b1caff23beb3867 -size 1466288 +oid sha256:5daf43772b05b759370133db665934d2cff98aea6775c5e588c4a15e14032887 +size 1274898 diff --git a/test/images/simulated/ideal/C09_FA0.png b/test/images/simulated/ideal/C09_FA0.png index d556b93..9687259 100644 --- a/test/images/simulated/ideal/C09_FA0.png +++ b/test/images/simulated/ideal/C09_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6aef88bced90e03fb41912fbdfe976f44e9e10e9e1b6924b907b963e709aa709 -size 1454499 +oid sha256:072a17790997ac45ab4c8c2f38a7f2fc2bd53e50fbfce6f111b0cfd3aadf3120 +size 1281445 diff --git a/test/images/simulated/ideal/C09_FB1.png b/test/images/simulated/ideal/C09_FB1.png index f4d1ab1..b1f697a 100644 --- a/test/images/simulated/ideal/C09_FB1.png +++ b/test/images/simulated/ideal/C09_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2cd8c3027779cf0900f8bb0f635c531167fe42e65671c7eb15c277e9f17ed4c7 -size 1470716 +oid sha256:4f86a187ea2bff9bbf93904280d139a18f9c66562c0a5456a4a64bed70c15785 +size 1298470 diff --git a/test/images/simulated/ideal/C09_FB2.png b/test/images/simulated/ideal/C09_FB2.png index 7602b07..18baf8f 100644 --- a/test/images/simulated/ideal/C09_FB2.png +++ b/test/images/simulated/ideal/C09_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bdb28e5630239b58ca76a756633c279e2d7ca7c3bc30a76feffd93ecd1a6324 -size 1434806 +oid sha256:94ff27dc9d8eb125f3916d5930a2bc12b17a4f83014732095cdb92163e44394d +size 1247026 diff --git a/test/images/simulated/ideal/C09_FB3.png b/test/images/simulated/ideal/C09_FB3.png index ab3b0b7..8167855 100644 --- a/test/images/simulated/ideal/C09_FB3.png +++ b/test/images/simulated/ideal/C09_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5bcbf81ee87d4c76e45dc66f5a9832fb2f2ba33faf2e78ef0233311c85a94a7 -size 1452240 +oid sha256:d5e13c48ae34f5d398ad08d6961498e74f3a47cbaa3a9f45681ce8b939c27771 +size 1275944 diff --git a/test/images/simulated/ideal/C09_FB4.png b/test/images/simulated/ideal/C09_FB4.png index 30bdbbf..657c0d8 100644 --- a/test/images/simulated/ideal/C09_FB4.png +++ b/test/images/simulated/ideal/C09_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59544d44495c6551e086d3946f1eadb35c4874b626f0999f99c799fc2812c2b1 -size 1411411 +oid sha256:86bc91442e4f660402a364e78da3d139532a74fcae54d61c3cf4c10c522f6e9a +size 1244595 diff --git a/test/images/simulated/ideal/C09_FC1.png b/test/images/simulated/ideal/C09_FC1.png index 2935f74..81f81d7 100644 --- a/test/images/simulated/ideal/C09_FC1.png +++ b/test/images/simulated/ideal/C09_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0a441f28cd0e901d78aeb677cb5d7cf0e7867666de17f211d4e9cb84104235b -size 1425882 +oid sha256:28470512c52a82dd746c94ee8da1c7b8319e55a8d257880694b1d00e51f291ca +size 1198124 diff --git a/test/images/simulated/ideal/C09_FC2.png b/test/images/simulated/ideal/C09_FC2.png index cd5ee43..c850cb9 100644 --- a/test/images/simulated/ideal/C09_FC2.png +++ b/test/images/simulated/ideal/C09_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:471cc1bcaf0d82dcd8894ccbeba081da1074a0040295749cd42c03698eb9734e -size 1415622 +oid sha256:1147802f007378d31d20d3ec358699976af3d4fe165eb5c7ee1f24d53bd32dd8 +size 1192527 diff --git a/test/images/simulated/ideal/C09_FC3.png b/test/images/simulated/ideal/C09_FC3.png index 185ef81..895b972 100644 --- a/test/images/simulated/ideal/C09_FC3.png +++ b/test/images/simulated/ideal/C09_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51f5b09c18152a8ae5e477880cc8400be7aa7c0510506156095e65abb739a07d -size 1407173 +oid sha256:10c56359b630c8eb265c246c70316bdbe358c43ed5b2ac2977a570c7084c748d +size 1192540 diff --git a/test/images/simulated/ideal/C09_FC4.png b/test/images/simulated/ideal/C09_FC4.png index 779635a..210f082 100644 --- a/test/images/simulated/ideal/C09_FC4.png +++ b/test/images/simulated/ideal/C09_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31753feb1ac18f05b302cb00841c4c10984afa6b489fa0c64bdc47ba67a84c4b -size 1413932 +oid sha256:232281e6ed6400e7c1ed9a22b2d12f4a5b50562f448f1dff6d4d1f89c17c92d7 +size 1195078 diff --git a/test/images/simulated/ideal/C09_FD1.png b/test/images/simulated/ideal/C09_FD1.png index 142dff0..918b9d5 100644 --- a/test/images/simulated/ideal/C09_FD1.png +++ b/test/images/simulated/ideal/C09_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c9f0b32ab060eedc28eb6129016ad277579de399e1353350a55a529ea2788ec -size 1460547 +oid sha256:151a89efe82bb32bdf743f8457415de6c75fc3c135b1f227e2d6fa5d4b602bb3 +size 1273026 diff --git a/test/images/simulated/ideal/C09_FD2.png b/test/images/simulated/ideal/C09_FD2.png index c366f8c..a2e77fc 100644 --- a/test/images/simulated/ideal/C09_FD2.png +++ b/test/images/simulated/ideal/C09_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56f58d9c3b06c9d1eaba2a0ac0e44b82e3542fd67fc18d1a0888554ae020a79d -size 1347209 +oid sha256:70ac100516e78ec21daf28b38ad555b20d7f9a27804cb31ace0a1ec7ed089df9 +size 1176896 diff --git a/test/images/simulated/ideal/C09_FD3.png b/test/images/simulated/ideal/C09_FD3.png index 6b218b7..77d3711 100644 --- a/test/images/simulated/ideal/C09_FD3.png +++ b/test/images/simulated/ideal/C09_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75f1ab0a4e5ff4b332c54a042e0e0dc17d89ed47dc3d87baea90246aa11a3096 -size 1359330 +oid sha256:c2d402401c505925efa8af0682eddf5777a7324bbf07ca947ed3fc7c7786f9b2 +size 1168861 diff --git a/test/images/simulated/ideal/C09_FD4.png b/test/images/simulated/ideal/C09_FD4.png index c194e2a..cb3e8fd 100644 --- a/test/images/simulated/ideal/C09_FD4.png +++ b/test/images/simulated/ideal/C09_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:62c15b2ab308b105358f32c994f68ae198b89965964450767a0498ebb869bc03 -size 1449371 +oid sha256:2decd304560a0b0fd03c9eb193737993dc33d5665a740cdea5c70ebb208cb701 +size 1264227 diff --git a/test/images/simulated/ideal/C09_FE1.png b/test/images/simulated/ideal/C09_FE1.png index 78aceee..b5fc27d 100644 --- a/test/images/simulated/ideal/C09_FE1.png +++ b/test/images/simulated/ideal/C09_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e179f3870581d079b6a2e95f075fcff7675d344dbd91807c2bb9c8e073b1dff8 -size 1384540 +oid sha256:1cf62ca13e6a828f2b532ca5604d97c69f4407b413146831088b830bd3b7bc70 +size 1188121 diff --git a/test/images/simulated/ideal/C09_FE2.png b/test/images/simulated/ideal/C09_FE2.png index edbd14d..e860322 100644 --- a/test/images/simulated/ideal/C09_FE2.png +++ b/test/images/simulated/ideal/C09_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8f2f765e68bd862f0d12f04238c0be571d02e1d2fbff11660bc443c10444e64 -size 1464999 +oid sha256:a8daa8d3cbd03337099263138aaaae0d58ecac98507f01003017509609c552cf +size 1292796 diff --git a/test/images/simulated/ideal/C09_FE3.png b/test/images/simulated/ideal/C09_FE3.png index 0b9fb0a..f21bd65 100644 --- a/test/images/simulated/ideal/C09_FE3.png +++ b/test/images/simulated/ideal/C09_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2679319695762e3768b79cce49a32ef8c6eceed31f21c49712a7d6d6bae297d4 -size 1453128 +oid sha256:b4ca94f00b54c45722669a77ff64e1acf7da3e7feb96d2d37ced42cf923526cc +size 1307856 diff --git a/test/images/simulated/ideal/C10_FA0.png b/test/images/simulated/ideal/C10_FA0.png index 6cdf17c..d831a0d 100644 --- a/test/images/simulated/ideal/C10_FA0.png +++ b/test/images/simulated/ideal/C10_FA0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:436bf777b12596f68deeb0234fd7339801bc470b9078bc1b36670354a6ec6574 -size 1407422 +oid sha256:d9ea2b453c981c3513872dca52650e7c1e90102ed00f58b585034b4e99b0669d +size 1210237 diff --git a/test/images/simulated/ideal/C10_FB1.png b/test/images/simulated/ideal/C10_FB1.png index afd5658..7fb8cee 100644 --- a/test/images/simulated/ideal/C10_FB1.png +++ b/test/images/simulated/ideal/C10_FB1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8410068983f2f043c9821e61113c521ca855fb7abee255faa6843ccbfdb44dc7 -size 1469827 +oid sha256:7e36266dc2b3c1a358e846a3174bd847039ab63c86eb4de38d40102e1bde6f70 +size 1300095 diff --git a/test/images/simulated/ideal/C10_FB2.png b/test/images/simulated/ideal/C10_FB2.png index 3474d4d..acaeedb 100644 --- a/test/images/simulated/ideal/C10_FB2.png +++ b/test/images/simulated/ideal/C10_FB2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a22b6aa3297aba3a37c8fed8ac14e85e4afbc4820b6c813b531d01cc65a8862 -size 1451378 +oid sha256:d9203c027aaba16532690f39a6ee519cd8af68f40a877d300f7f82de64f8861e +size 1260990 diff --git a/test/images/simulated/ideal/C10_FB3.png b/test/images/simulated/ideal/C10_FB3.png index f5e933b..7acb646 100644 --- a/test/images/simulated/ideal/C10_FB3.png +++ b/test/images/simulated/ideal/C10_FB3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:542021d9d943d556138b81b51c81eeaf14af38cc5644d27bd56799f28d53a8cf -size 1469453 +oid sha256:cfa87b15eb137ec661c9e5f7c3ede9869e5aca44276abf596e4d8e8314edc950 +size 1289728 diff --git a/test/images/simulated/ideal/C10_FB4.png b/test/images/simulated/ideal/C10_FB4.png index f42ce30..b40f68c 100644 --- a/test/images/simulated/ideal/C10_FB4.png +++ b/test/images/simulated/ideal/C10_FB4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51f96973947502a894084dde80e42455ec3e446b47075ed3281bee4a83553f03 -size 1444926 +oid sha256:0e2f392082ce341d1e0b2d4c5dc9bdfb51f0d5680fc876026ce56b847a9924d4 +size 1258300 diff --git a/test/images/simulated/ideal/C10_FC1.png b/test/images/simulated/ideal/C10_FC1.png index 4f86af4..3d3dcb6 100644 --- a/test/images/simulated/ideal/C10_FC1.png +++ b/test/images/simulated/ideal/C10_FC1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:daf90b7cc95f51f6b69dadcc9861d5756f2cc4ba73850bf4ec06bd9e95fe068b -size 1416813 +oid sha256:6a2dda3c318f5e4f34c6bd4cb90f2afee654e4a6f86f0ddf0d796e668c3dba96 +size 1205711 diff --git a/test/images/simulated/ideal/C10_FC2.png b/test/images/simulated/ideal/C10_FC2.png index cc406d2..c840eee 100644 --- a/test/images/simulated/ideal/C10_FC2.png +++ b/test/images/simulated/ideal/C10_FC2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7e9b31ddd50c58bcfcb51e92b76b468984d22f7af9d5b4295226ffd9a61892b -size 1435777 +oid sha256:9e7637df71efdbd8ad5607c64461486d9e59c348f03fd1fd5c22db0db760cd5c +size 1211787 diff --git a/test/images/simulated/ideal/C10_FC3.png b/test/images/simulated/ideal/C10_FC3.png index 29a7b82..f118206 100644 --- a/test/images/simulated/ideal/C10_FC3.png +++ b/test/images/simulated/ideal/C10_FC3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2650ee3240f42f18f9fb34d3fde270c88970a16d5e2146209d7a064d78e224c0 -size 1417079 +oid sha256:48d43d3d550bc5b0384c9fda972b8e51c3aaece8e2c55aaf83d8b1cee6e66b65 +size 1201509 diff --git a/test/images/simulated/ideal/C10_FC4.png b/test/images/simulated/ideal/C10_FC4.png index ca2515a..3cdb70c 100644 --- a/test/images/simulated/ideal/C10_FC4.png +++ b/test/images/simulated/ideal/C10_FC4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d64feb8705e54c7daf8ac9d7dcc05dda3250b37c2d3212f4bc243a5e544b104a -size 1434182 +oid sha256:3b3e54546a0ae6559159beb81b8efbd8ba6643329e3370a18e69773f67446ed7 +size 1212204 diff --git a/test/images/simulated/ideal/C10_FD1.png b/test/images/simulated/ideal/C10_FD1.png index e103ff0..4a4be24 100644 --- a/test/images/simulated/ideal/C10_FD1.png +++ b/test/images/simulated/ideal/C10_FD1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:285f70db82065551d8c1f4e778940f1f1076d08e8dfedf59e7945ee742f619d5 -size 1394497 +oid sha256:728e1a397ed745e29fe5e410b160dfbaad1a5f6b8b60b7cd14dbfe93285f11bc +size 1195040 diff --git a/test/images/simulated/ideal/C10_FD2.png b/test/images/simulated/ideal/C10_FD2.png index 140e874..67a5490 100644 --- a/test/images/simulated/ideal/C10_FD2.png +++ b/test/images/simulated/ideal/C10_FD2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94560ee019a6c3bc91e94cbde7da07802cb7776ccac76be08347726ee95c2de9 -size 1458368 +oid sha256:ca5a64c6d622ca0b2f5e1a43d72f89ec9421c22c6ef5f2ed530e97bd491acbb7 +size 1285930 diff --git a/test/images/simulated/ideal/C10_FD3.png b/test/images/simulated/ideal/C10_FD3.png index 8566e8b..ee68c13 100644 --- a/test/images/simulated/ideal/C10_FD3.png +++ b/test/images/simulated/ideal/C10_FD3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b23f3090b49f5ae9c18a5f568417968c481be52ba32aa78e020c3b0ede5658b -size 1391236 +oid sha256:c7c3ec5f10b5cc552303e7c5d5dc71d093839349452ff0c2d96bb4cd59180f6d +size 1181306 diff --git a/test/images/simulated/ideal/C10_FD4.png b/test/images/simulated/ideal/C10_FD4.png index 991a809..4853af7 100644 --- a/test/images/simulated/ideal/C10_FD4.png +++ b/test/images/simulated/ideal/C10_FD4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e7633d8a2370eda0ebec2d108259e3ac52507d498ff81a6e07b728e2ea00f78 -size 1452312 +oid sha256:34312f42da5f393364c3db9072f89dd5a1fcf64812b440ffa1eb01d98405244b +size 1287139 diff --git a/test/images/simulated/ideal/C10_FE1.png b/test/images/simulated/ideal/C10_FE1.png index 141561d..924e238 100644 --- a/test/images/simulated/ideal/C10_FE1.png +++ b/test/images/simulated/ideal/C10_FE1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a689f9ffb8040ae50e6eadbc9c31b2832e2b8a5c37af3df812ab6a58af95489 -size 1389347 +oid sha256:5364b626b26cbed4a00fd3e6c8e1e70eb701d42fd16be2464081b55a5056496a +size 1183342 diff --git a/test/images/simulated/ideal/C10_FE2.png b/test/images/simulated/ideal/C10_FE2.png index e5878f3..ada89b9 100644 --- a/test/images/simulated/ideal/C10_FE2.png +++ b/test/images/simulated/ideal/C10_FE2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b3dd03dc4e8a82d9627a3e0ab994895cf085ba08df60fe42eb5a64464820dbf -size 1409084 +oid sha256:10768669908715812dd798af55f42c44218f78cdd7174b1c84808ce33370c237 +size 1266312 diff --git a/test/images/simulated/ideal/C10_FE3.png b/test/images/simulated/ideal/C10_FE3.png index f6b70aa..535c2df 100644 --- a/test/images/simulated/ideal/C10_FE3.png +++ b/test/images/simulated/ideal/C10_FE3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9bf89d858b05a6c7929f6031f4664b683554e77ae0ca0dc8121396e92e0fe96b -size 1417623 +oid sha256:0c6d984edc7ec98c4c8dd6be704a7963be15d69a9a0dddb5613efe08856709ff +size 1274775 diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index f190b38..201f720 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -1,9 +1,9 @@ from src.common import \ ExtrinsicCalibration, \ + ExtrinsicCalibrationDetectorResult, \ ImageResolution, \ ImageUtils, \ IntrinsicParameters, \ - IntrinsicCalibration, \ IntrinsicCalibrator, \ KeyValueSimpleAny, \ KeyValueSimpleString, \ @@ -13,8 +13,6 @@ ArucoOpenCVCommon from src.implementations.extrinsic_charuco_opencv import \ CharucoOpenCVExtrinsicCalibrator -from src.implementations.intrinsic_charuco_opencv import \ - CharucoOpenCVIntrinsicCalibrator import cv2 import datetime import numpy @@ -82,24 +80,14 @@ def test(self): message=message) # All cameras have the same imaging parameters. - # To simplify our lives and ensure a reasonable result, - # we'll calibrate all cameras with the same set of input images. - # We'll use all images from the A# and B# sets of frames. - intrinsic_parameters: IntrinsicParameters - with TemporaryDirectory() as temppath: - intrinsic_calibrator: CharucoOpenCVIntrinsicCalibrator = CharucoOpenCVIntrinsicCalibrator( - configuration=IntrinsicCalibrator.Configuration(data_path=temppath), - status_message_source=status_message_source) - for camera_id, image_filepaths_by_frame_id in image_filepaths_by_camera_frame.items(): - for frame_id, image_filepath in image_filepaths_by_frame_id.items(): - if not frame_id.startswith("A") and not frame_id.startswith("B"): - continue - image: numpy.ndarray = cv2.imread(image_filepath) - image_base64: str = ImageUtils.image_to_base64(image) - intrinsic_calibrator.add_image(image_base64) - intrinsics_calibration: IntrinsicCalibration - _, intrinsics_calibration = intrinsic_calibrator.calculate(image_resolution=IMAGE_RESOLUTION) - intrinsic_parameters = intrinsics_calibration.calibrated_values + # These were calculated by hand assuming lenses without any distortions + intrinsic_parameters: IntrinsicParameters = IntrinsicParameters( + focal_length_x_px=3582.76878, + focal_length_y_px=3640.38430, + optical_center_x_px=960.0, + optical_center_y_px=540.0, + radial_distortion_coefficients=[0, 0, 0], + tangential_distortion_coefficients=[0, 0]) intrinsics_by_camera: dict[str, IntrinsicParameters] = dict() # Access as x[camera_id] for camera_id in image_filepaths_by_camera_frame.keys(): @@ -121,6 +109,14 @@ def test(self): timestamp_utc_iso8601=timestamps_iso8601_by_frame[frame_id]) _, extrinsic_calibration = extrinsic_calibrator.calculate(detector_intrinsics_by_label=intrinsics_by_camera) - message = f"{len(extrinsic_calibration.calibrated_values)} calibrations." - print(message) + calibrated_value: ExtrinsicCalibrationDetectorResult + for calibrated_value in extrinsic_calibration.calibrated_values: + print( + f"Detector {calibrated_value.detector_label}:\n" + f" Translation: {calibrated_value.detector_to_reference.get_translation()}\n" + f" Rotation: {calibrated_value.detector_to_reference.get_rotation_as_quaternion(canonical=True)}") + +if __name__ == "__main__": + a = TestPoseSolver() + a.test() From d0d2c5e7a06ec4bc26a6b1c44b5c8c7ed889288b Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 13 Aug 2025 15:15:57 -0400 Subject: [PATCH 17/33] WIP: Some decoupling of status-tracking from calibrator classes --- src/common/__init__.py | 4 +- src/common/calibration.py | 366 +++++++++--------- src/detector/detector.py | 60 +-- .../intrinsic_charuco_opencv.py | 12 +- test/test_extrinsic_calibration.py | 15 +- 5 files changed, 236 insertions(+), 221 deletions(-) diff --git a/src/common/__init__.py b/src/common/__init__.py index f4fdb8d..3cb8897 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -15,13 +15,13 @@ TimeSyncStartRequest, \ TimeSyncStopRequest from .calibration import \ + CalibrationErrorReason, \ ExtrinsicCalibration, \ ExtrinsicCalibrationDetectorResult, \ ExtrinsicCalibrator, \ IntrinsicCalibration, \ IntrinsicCalibrator, \ - MCTExtrinsicCalibrationError, \ - MCTIntrinsicCalibrationError + MCTCalibrationError from .camera import \ Camera, \ MCTCameraRuntimeError diff --git a/src/common/calibration.py b/src/common/calibration.py index f4b22ac..a9c498d 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -8,9 +8,7 @@ from .serialization import \ IOUtils from .status import \ - MCTError, \ - SeverityLabel, \ - StatusMessageSource + MCTError import abc import datetime from enum import StrEnum @@ -25,20 +23,36 @@ logger = logging.getLogger(__name__) -class MCTIntrinsicCalibrationError(MCTError): - message: str +class CalibrationErrorReason(StrEnum): + INITIALIZATION: Final[str] = "initialization" + INVALID_INPUT: Final[str] = "invalid_input" + INVALID_STATE: Final[str] = "invalid_state" + DATA_NOT_FOUND: Final[str] = "data_not_found" + COMPUTATION_FAILURE: Final[str] = "computation_failure" - def __init__(self, message: str, *args): - super().__init__(args) - self.message = message + +_PUBLIC_MESSAGE_KEY: Final[str] = "public_message" +_PRIVATE_MESSAGE_KEY: Final[str] = "private_message" -class MCTExtrinsicCalibrationError(MCTError): - message: str +class MCTCalibrationError(MCTError): + public_message: str | None + private_message: str + reason: CalibrationErrorReason - def __init__(self, message: str, *args): + def __init__( + self, + reason: CalibrationErrorReason, + public_message: str | None = None, + private_message: str | None = None, + *args + ): super().__init__(args) - self.message = message + self.reason = reason + self.public_message = public_message + self.private_message = private_message + if self.private_message is None and self.public_message is not None: + self.private_message = self.private_message _RESULT_FORMAT: Final[str] = ".json" @@ -106,70 +120,54 @@ class AbstractCalibrator(abc.ABC): _data_ledger: _DataLedger _data_ledger_filepath: str - _status_message_source: StatusMessageSource - def __init__( self, - status_message_source: StatusMessageSource, - data_path: str + data_path: str, + **kwargs ): - self._status_message_source = status_message_source - self._data_path = data_path if not self._exists_on_filesystem(path=self._data_path, pathtype="path", create_path=True): - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.CRITICAL, - message="Data path does not exist and could not be created.") - detailed_message: str = f"{self._data_path} does not exist and could not be created." - logger.critical(detailed_message) - raise RuntimeError(detailed_message) + raise MCTCalibrationError( + reason=CalibrationErrorReason.INITIALIZATION, + public_message="Data path does not exist and could not be created.", + private_message=f"{self._data_path} does not exist and could not be created.") self._data_ledger_filepath = os.path.join(self._data_path, AbstractCalibrator._DATA_LEDGER_FILENAME) - if not self._load_data_ledger(): - message: str = "The data ledger could not be loaded or created. "\ - "In order to avoid data loss, the software will now abort. " \ - "Please manually correct or remove the file in the filesystem." - logger.critical(message) - self._status_message_source.enqueue_status_message(severity=SeverityLabel.CRITICAL, message=message) - raise RuntimeError(message) + self._load_data_ledger() def _add_image( self, image: numpy.ndarray, metadata: _ImageMetadata, - ) -> bool: + ) -> None: """ Helper for saving images consistently across different types of calibrators Returns true if successful, False otherwise. """ # Before making any changes to the data ledger, make sure folders exist if not self._exists_on_filesystem(path=os.path.dirname(metadata.filepath), pathtype="path", create_path=True): - message = "Failed to create storage location for input image." - logger.error(message) - self._status_message_source.enqueue_status_message(severity=SeverityLabel.ERROR, message=message) - return False + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Failed to create storage location for input image.") # Also make sure that this file does not somehow already exist (highly unlikely) if os.path.exists(metadata.filepath): - logger.error(f"Image {metadata.filepath} appears to already exist. This is never expected to occur.") - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Image appears to already exist. This is never expected to occur. " - "Please try again, and if this error continues to occur then please report a bug.") - return False + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Image appears to already exist. This is never expected to occur. " + "Please try again, and if this error continues to occur then please report a bug.", + private_message=f"Image {metadata.filepath} appears to already exist. This is never expected to occur.") image_bytes: bytes image_bytes = ImageUtils.image_to_bytes(image_data=image, image_format=ImageFormat.FORMAT_PNG) try: with (open(metadata.filepath, 'wb') as in_file): in_file.write(image_bytes) except IOError as e: - logger.error(f"Failed to save image to {metadata.filepath}, reason: {str(e)}.") - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Failed to save image - see calibration log for more details.") - return False + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Failed to save image - see local log for more details.", + private_message=f"Failed to save image to {metadata.filepath}, reason: {str(e)}.") self._data_ledger.image_metadata_list.append(metadata) self._save_data_ledger() - return True def _add_result( self, @@ -183,22 +181,20 @@ def _add_result( self._data_ledger.result_metadata_list.append(metadata) self._save_data_ledger() - def _delete_file_if_exists(self, filepath: str): + @staticmethod + def _delete_file_if_exists(filepath: str): try: os.remove(filepath) - except FileNotFoundError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=f"Failed to remove a file from the calibrator because it does not exist. " - f"See its internal log for details.") + except FileNotFoundError: + pass except OSError as e: - logger.error(e) - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=f"Failed to remove a file from the calibrator due to an unexpected reason. " - f"See its internal log for details.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Failed to remove a file from the calibrator due to an unexpected reason. " + f"See local log for details.", + private_message=f"Failed to delete file {filepath} for reason: {str(e)}.") + # noinspection DuplicatedCode def delete_staged(self) -> None: image_indices_to_delete: list = list() image_metadata: _ImageMetadata @@ -218,21 +214,28 @@ def delete_staged(self) -> None: del self._data_ledger.result_metadata_list[i] self._save_data_ledger() + @staticmethod def _exists_on_filesystem( - self, path: str, pathtype: IOUtils.PathType, create_path: bool = False ) -> bool: - return IOUtils.exists( + errors: dict[str, str] = dict() + return_value: bool = IOUtils.exists( path=path, pathtype=pathtype, create_path=create_path, - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=msg), - on_error_for_dev=logger.error) - + on_error_for_user=lambda msg: errors.__setitem__(_PUBLIC_MESSAGE_KEY, msg), + on_error_for_dev=lambda msg: errors.__setitem__(_PRIVATE_MESSAGE_KEY, msg)) + if len(errors) > 0: + logger.error(errors[_PRIVATE_MESSAGE_KEY]) + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Error determining if a file exists on the file system; See local log for details.", + private_message=errors[_PRIVATE_MESSAGE_KEY]) + return return_value + + # noinspection DuplicatedCode def _get_result_metadata_by_identifier( self, identifier: str @@ -245,11 +248,13 @@ def _get_result_metadata_by_identifier( matching_result_metadata = result_metadata break if match_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Identifier {identifier} is not associated with any result.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + public_message=f"Identifier {identifier} is not associated with any result.") elif match_count > 1: - raise MCTIntrinsicCalibrationError( - message=f"Identifier {identifier} is associated with multiple results.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Identifier {identifier} is associated with multiple results.") return matching_result_metadata def list_image_metadata(self) -> list[_ImageMetadata]: @@ -258,58 +263,50 @@ def list_image_metadata(self) -> list[_ImageMetadata]: def list_result_metadata(self) -> list[_ResultMetadata]: return list(self._data_ledger.result_metadata_list) - def _load_data_ledger(self) -> bool: + def _load_data_ledger(self) -> None: """ :return: True if loaded or if it can be created without overwriting existing data. False otherwise. """ json_dict: dict - load_success: bool - json_dict, load_success = self._load_dict_from_filepath(filepath=self._data_ledger_filepath) - if not load_success: - return False + json_dict = self._load_dict_from_filepath(filepath=self._data_ledger_filepath) try: self._data_ledger = _DataLedger(**json_dict) except ValidationError as e: logger.error(e) - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Failed to parse data ledger from file.") - return False - return True + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Error loading the data ledger; See local log for details.", + private_message=str(e)) + @staticmethod def _load_dict_from_filepath( - self, filepath: str - ) -> tuple[dict, bool]: + ) -> dict: """ - :return: - dict containing existing data (or empty if an unexpected error occurred) - bool indicating whether if loaded or if it can be created without overwriting existing data. False otherwise. + :return: dict containing existing data (or empty if no data exists) """ if not os.path.exists(filepath): - return dict(), True + return dict() # Not considered an error, just doesn't exist yet elif not os.path.isfile(filepath): - logger.critical(f"Json file location {filepath} exists but is not a file.") - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.CRITICAL, - message="Filepath location for json exists but is not a file. " + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="A file failed to load. See local log for details.", + private_message=f"JSON at {filepath} exists but is not a file. " "Most likely a directory exists at that location, " "and it needs to be manually removed.") - return dict(), False + errors: dict[str, str] = dict() json_dict: dict = IOUtils.hjson_read( filepath=filepath, - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=msg), - on_error_for_dev=logger.error) + on_error_for_user=lambda msg: errors.__setitem__(_PUBLIC_MESSAGE_KEY, msg), + on_error_for_dev=lambda msg: errors.__setitem__(_PRIVATE_MESSAGE_KEY, msg)) if not json_dict: - logger.error(f"Failed to load json from file {filepath}.") - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message="Failed to load json from file.") - return dict(), False - return json_dict, True + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Error loading data; See local log for details.", + private_message=errors[_PRIVATE_MESSAGE_KEY]) + return json_dict + # noinspection DuplicatedCode def load_image( self, identifier: str @@ -322,20 +319,26 @@ def load_image( matching_metadata = image_metadata break if match_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Identifier {identifier} is not associated with any image.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + private_message=f"Identifier {identifier} is not associated with any image.") elif match_count > 1: - raise MCTIntrinsicCalibrationError( - message=f"Identifier {identifier} is associated with multiple images.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Identifier {identifier} is associated with multiple images.") if not os.path.exists(matching_metadata.filepath): - raise MCTIntrinsicCalibrationError(message=f"File does not exist for image {identifier}.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"File does not exist for image {identifier}.") image_bytes: bytes try: with (open(matching_metadata.filepath, 'rb') as in_file): image_bytes = in_file.read() except OSError: - raise MCTIntrinsicCalibrationError(message=f"Failed to open image {identifier}.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Failed to open image {identifier}.") image_base64 = ImageUtils.bytes_to_base64(image_bytes=image_bytes) return image_base64 @@ -357,9 +360,7 @@ def _load_result_by_metadata( """ json_dict: dict load_success: bool - json_dict, load_success = self._load_dict_from_filepath(metadata.filepath) - if not load_success: - raise MCTIntrinsicCalibrationError(message=f"Failed to load result {metadata.identifier}.") + json_dict = self._load_dict_from_filepath(metadata.filepath) result: result_type = result_type(**json_dict) return result @@ -368,8 +369,8 @@ def _save_data_ledger(self) -> None: filepath=self._data_ledger_filepath, json_dict=self._data_ledger.model_dump()) + @staticmethod def _save_dict_to_filepath( - self, filepath: str, json_dict: dict, ignore_none: bool = False @@ -379,15 +380,20 @@ def _save_dict_to_filepath( :param json_dict: What to write to the file :param ignore_none: See IOUtils.json_write """ + errors: dict[str, str] = dict() IOUtils.json_write( filepath=filepath, json_dict=json_dict, - on_error_for_user=lambda msg: self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=msg), - on_error_for_dev=logger.error, + on_error_for_user=lambda msg: errors.__setitem__(_PUBLIC_MESSAGE_KEY, msg), + on_error_for_dev=lambda msg: errors.__setitem__(_PRIVATE_MESSAGE_KEY, msg), ignore_none=ignore_none) + if len(errors) > 0: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Error saving data; See local log for more details.", + private_message=errors[_PRIVATE_MESSAGE_KEY]) + # noinspection DuplicatedCode def update_image_metadata( self, image_identifier: str, @@ -395,24 +401,27 @@ def update_image_metadata( image_label: str | None ) -> None: match_count: int = 0 - for image in self._data_ledger.image_metadata_list: - if image.identifier == image_identifier: - image.state = image_state - if image_label is not None: - image.image_label = image_label + matched_metadata: _ImageMetadata | None = None + for metadata in self._data_ledger.image_metadata_list: + if metadata.identifier == image_identifier: match_count += 1 - break + matched_metadata = metadata if match_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Identifier {image_identifier} is not associated with any image.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + private_message=f"Identifier {image_identifier} is not associated with any image.") elif match_count > 1: - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, - message=f"Identifier {image_identifier} is associated with multiple images. " - "This suggests that the data ledger is in an inconsistent state. " - "It may be prudent to either manually correct it, or recreate it.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Identifier {image_identifier} is associated with multiple images. " + "This suggests that the data ledger is in an inconsistent state. " + "It may be prudent to either manually correct it, or recreate it.") + matched_metadata.state = image_state + if image_label is not None: + matched_metadata.image_label = image_label self._save_data_ledger() + # noinspection DuplicatedCode def update_result_metadata( self, identifier: str, @@ -420,24 +429,24 @@ def update_result_metadata( result_label: str | None = None ) -> None: match_count: int = 0 - for result in self._data_ledger.result_metadata_list: - if result.identifier == identifier: - result.state = state - if result_label is not None: - result.result_label = result_label + matched_metadata: _ResultMetadata | None = None + for metadata in self._data_ledger.result_metadata_list: + if metadata.identifier == identifier: match_count += 1 - break - + matched_metadata = metadata if match_count < 1: - raise MCTIntrinsicCalibrationError( - message=f"Identifier {identifier} is not associated with any result.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + private_message=f"Identifier {identifier} is not associated with any result.") elif match_count > 1: - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, - message=f"Identifier {identifier} is associated with multiple results. " - "This suggests that the data ledger is in an inconsistent state. " - "It may be prudent to either manually correct it, or recreate it.") - + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + private_message=f"Identifier {identifier} is associated with multiple results. " + "This suggests that the data ledger is in an inconsistent state. " + "Please manually correct it, or recreate it.") + matched_metadata.state = state + if result_label is not None: + matched_metadata.result_label = result_label self._save_data_ledger() @@ -463,12 +472,11 @@ class IntrinsicCalibrator(AbstractCalibrator, abc.ABC): def __init__( self, configuration: Configuration, - status_message_source: StatusMessageSource ): super().__init__( - status_message_source=status_message_source, data_path=configuration.data_path) + # noinspection DuplicatedCode def add_image( self, image_base64: str, @@ -507,16 +515,20 @@ def calculate( if image_metadata.state != _ImageState.SELECT: continue if not self._exists_on_filesystem(path=image_metadata.filepath, pathtype="filepath"): - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=f"Image {image_metadata.identifier} was not found. " - "This suggests that the data ledger is in an inconsistent state. " - "It will be omitted from the calibration.") - continue + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="An image failed to load. " + "suggesting that the data ledger is in an inconsistent state. " + "Please see the locaL log for details.", + private_message=f"Image {image_metadata.identifier} was not found. " + "This suggests that the data ledger is in an inconsistent state. " + "Please correct the data ledger.") image_metadata_list.append(image_metadata) if len(image_metadata_list) == 0: - raise MCTIntrinsicCalibrationError(message=f"No images found for resolution {str(image_resolution)}.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.COMPUTATION_FAILURE, + public_message=f"No images found for resolution {str(image_resolution)}.") intrinsic_calibration, image_metadata_list = self._calculate_implementation( image_resolution=image_resolution, @@ -568,17 +580,16 @@ def get_result_active_by_image_resolution( matched_metadata = result_metadata if match_count < 1: - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, - message=f"No result metadata is active for resolution {str(image_resolution)}." - "Returning latest result.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + public_message=f"No result metadata is active for resolution {str(image_resolution)}. " + "Please ensure one has been selected, then try again.") if match_count > 1: - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, - message=f"Multiple result metadata are active for resolution {str(image_resolution)}. " - "Returning latest active result. " - "To recover from this ambiguous state, it is strong recommended to explicitly set " - "one of the results as \"active\", which will reset others to \"retain\".") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Multiple result metadata are active for resolution {str(image_resolution)}. " + "To recover from this ambiguous state, explicitly set " + "one of the results as \"active\", which will reset others to \"retain\".") if matched_metadata is None: return None @@ -659,12 +670,13 @@ class ExtrinsicCalibrator(AbstractCalibrator, abc.ABC): def __init__( self, configuration: Configuration, - status_message_source: StatusMessageSource + **kwargs ): super().__init__( - status_message_source=status_message_source, - data_path=configuration.data_path) + data_path=configuration.data_path, + **kwargs) + # noinspection DuplicatedCode def add_image( self, image_base64: str, @@ -705,12 +717,14 @@ def calculate( if image_metadata.state != _ImageState.SELECT: continue if not self._exists_on_filesystem(path=image_metadata.filepath, pathtype="filepath"): - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.ERROR, - message=f"Image {image_metadata.identifier} was not found. " - "This suggests that the data ledger is in an inconsistent state. " - "It will be omitted from the calibration.") - continue + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="An image failed to load. " + "suggesting that the data ledger is in an inconsistent state. " + "Please see the locaL log for details.", + private_message=f"Image {image_metadata.identifier} was not found. " + "This suggests that the data ledger is in an inconsistent state. " + "Please correct the data ledger.") image_metadata_list.append(image_metadata) # This is a check to make sure that there are no duplicates over any (timestamp, detector_label) @@ -718,10 +732,14 @@ def calculate( (metadata.timestamp_utc_iso8601, metadata.detector_label) for metadata in image_metadata_list] if len(identifiers) != len(set(identifiers)): - raise MCTExtrinsicCalibrationError(message="Duplicates were detected over (timestamp, detector_label).") + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message="Duplicate data were detected over (timestamp, detector_label).") if len(image_metadata_list) == 0: - raise MCTIntrinsicCalibrationError(message=f"No images found for calibration.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.COMPUTATION_FAILURE, + public_message=f"No images found for calibration.") extrinsic_calibration, image_metadata_list = self._calculate_implementation( detector_intrinsics_by_label=detector_intrinsics_by_label, diff --git a/src/detector/detector.py b/src/detector/detector.py index 4199f12..9666d1e 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -44,7 +44,7 @@ IntrinsicCalibration, \ IntrinsicCalibrator, \ KeyValueMetaAbstract, \ - MCTIntrinsicCalibrationError, \ + MCTCalibrationError, \ MCTCameraRuntimeError, \ MCTComponent, \ MCTAnnotatorRuntimeError, \ @@ -93,8 +93,7 @@ def __init__( self._detector_configuration = detector_configuration self._calibrator = IntrinsicCalibrator( - configuration=detector_configuration.calibrator_configuration, - status_message_source=self.get_status_message_source()) + configuration=detector_configuration.calibrator_configuration) self._camera = camera_type( configuration=detector_configuration.camera_configuration, status_message_source=self.get_status_message_source()) @@ -116,8 +115,9 @@ def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | Erro try: result_identifier, intrinsic_calibration = self._calibrator.calculate( image_resolution=request.image_resolution) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationCalculateResponse( result_identifier=result_identifier, intrinsic_calibration=intrinsic_calibration) @@ -125,8 +125,9 @@ def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | Erro def calibration_delete_staged(self, **_kwargs) -> EmptyResponse | ErrorResponse: try: self._calibrator.delete_staged() - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return EmptyResponse() def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | ErrorResponse: @@ -135,8 +136,9 @@ def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | Erro image_format=ImageFormat.FORMAT_PNG, requested_resolution=None) image_identifier: str = self._calibrator.add_image(image_base64=image_base64) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationImageAddResponse(image_identifier=image_identifier) def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | ErrorResponse: @@ -147,8 +149,9 @@ def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | Error image_base64: str try: image_base64 = self._calibrator.load_image(identifier=request.image_identifier) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationImageGetResponse(image_base64=image_base64) def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataListResponse | ErrorResponse: @@ -160,8 +163,9 @@ def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataL try: image_metadata_list = self._calibrator.list_image_metadata_by_image_resolution( image_resolution=request.image_resolution) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationImageMetadataListResponse(metadata_list=image_metadata_list) def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: @@ -174,16 +178,18 @@ def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorRe image_identifier=request.image_identifier, image_state=request.image_state, image_label=request.image_label) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return EmptyResponse() def calibration_resolution_list(self, **_kwargs) -> CalibrationResolutionListResponse | ErrorResponse: resolutions: list[ImageResolution] try: resolutions = self._calibrator.list_resolutions() - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationResolutionListResponse(resolutions=resolutions) def calibration_result_get(self, **kwargs) -> CalibrationResultGetResponse | ErrorResponse: @@ -194,8 +200,9 @@ def calibration_result_get(self, **kwargs) -> CalibrationResultGetResponse | Err intrinsic_calibration: IntrinsicCalibration try: intrinsic_calibration = self._calibrator.get_result(result_identifier=request.result_identifier) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationResultGetResponse(intrinsic_calibration=intrinsic_calibration) def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActiveResponse | ErrorResponse: @@ -203,8 +210,9 @@ def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActive try: image_resolution: ImageResolution = self._camera.get_resolution() intrinsic_calibration = self._calibrator.get_result_active_by_image_resolution(image_resolution=image_resolution) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadataListResponse | ErrorResponse: @@ -216,8 +224,9 @@ def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadat try: result_metadata_list = self._calibrator.list_result_metadata_by_image_resolution( image_resolution=request.image_resolution) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return CalibrationResultMetadataListResponse(metadata_list=result_metadata_list) def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: @@ -230,8 +239,9 @@ def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorR identifier=request.result_identifier, state=request.result_state, result_label=request.result_label) - except MCTIntrinsicCalibrationError as e: - return ErrorResponse(message=e.message) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) return EmptyResponse() def camera_image_get(self, **kwargs) -> CameraImageGetResponse | ErrorResponse: diff --git a/src/implementations/intrinsic_charuco_opencv.py b/src/implementations/intrinsic_charuco_opencv.py index 78cabac..07488a4 100644 --- a/src/implementations/intrinsic_charuco_opencv.py +++ b/src/implementations/intrinsic_charuco_opencv.py @@ -1,11 +1,11 @@ from .common_aruco_opencv import ArucoOpenCVCommon from src.common import \ + CalibrationErrorReason, \ ImageResolution, \ IntrinsicCalibration, \ IntrinsicCalibrator, \ IntrinsicParameters, \ - MCTIntrinsicCalibrationError, \ - SeverityLabel + MCTCalibrationError import cv2 import cv2.aruco import datetime @@ -41,10 +41,6 @@ def _calculate_implementation( dictionary=charuco_spec.aruco_dictionary(), parameters=aruco_detector_parameters) if len(marker_corners) <= 0: - self._status_message_source.enqueue_status_message( - severity=SeverityLabel.WARNING, - message=f"Image {metadata.identifier} did not appear to contain any identifiable markers. " - f"It will be omitted from the calibration.") continue used_image_metadata.append(metadata) # Note: @@ -63,7 +59,9 @@ def _calculate_implementation( all_charuco_ids.append(frame_charuco_ids) if len(all_charuco_corners) <= 0: - raise MCTIntrinsicCalibrationError(message="The input images did not contain visible markers.") + raise MCTCalibrationError( + reason=CalibrationErrorReason.COMPUTATION_FAILURE, + public_message="The input images did not contain visible markers.") # outputs to be stored in these containers calibration_result = cv2.aruco.calibrateCameraCharucoExtended( diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index 201f720..deaf893 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -6,9 +6,7 @@ IntrinsicParameters, \ IntrinsicCalibrator, \ KeyValueSimpleAny, \ - KeyValueSimpleString, \ - SeverityLabel, \ - StatusMessageSource + KeyValueSimpleString from src.implementations.common_aruco_opencv import \ ArucoOpenCVCommon from src.implementations.extrinsic_charuco_opencv import \ @@ -35,10 +33,6 @@ class TestPoseSolver(unittest.TestCase): def test(self): - status_message_source: StatusMessageSource = StatusMessageSource( - source_label="test", - send_to_logger=True) # Python built-in logger - # Organize ourselves with respect to the input data image_location: str = os.path.join("images", "simulated", "ideal") image_contents: list[str] = os.listdir(image_location) @@ -74,10 +68,6 @@ def test(self): + datetime.timedelta(seconds=image_count)).isoformat() image_filepaths_by_frame_camera[frame_id][camera_id] = image_filepath image_count += 1 - message = f"Found {image_count} image files." - status_message_source.enqueue_status_message( - severity=SeverityLabel.INFO, - message=message) # All cameras have the same imaging parameters. # These were calculated by hand assuming lenses without any distortions @@ -97,8 +87,7 @@ def test(self): extrinsic_calibration: ExtrinsicCalibration with TemporaryDirectory() as temppath: extrinsic_calibrator: CharucoOpenCVExtrinsicCalibrator = CharucoOpenCVExtrinsicCalibrator( - configuration=IntrinsicCalibrator.Configuration(data_path=temppath), - status_message_source=status_message_source) + configuration=IntrinsicCalibrator.Configuration(data_path=temppath)) for frame_id, image_filepaths_by_camera_id in image_filepaths_by_frame_camera.items(): for camera_id, image_filepath in image_filepaths_by_camera_id.items(): image: numpy.ndarray = cv2.imread(image_filepath) From a87032d1c0b190ee132738a9ac9845450de1e881 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 13 Aug 2025 15:31:22 -0400 Subject: [PATCH 18/33] WIP: Replace constants in extrinsic calibration with configurable parameters --- src/common/calibration.py | 21 +++++----- .../extrinsic_charuco_opencv.py | 40 +++++++++---------- test/test_extrinsic_calibration.py | 2 +- 3 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/common/calibration.py b/src/common/calibration.py index a9c498d..5ffbd6a 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -122,10 +122,9 @@ class AbstractCalibrator(abc.ABC): def __init__( self, - data_path: str, - **kwargs + configuration: _Configuration ): - self._data_path = data_path + self._data_path = configuration.data_path if not self._exists_on_filesystem(path=self._data_path, pathtype="path", create_path=True): raise MCTCalibrationError( reason=CalibrationErrorReason.INITIALIZATION, @@ -471,10 +470,11 @@ class IntrinsicCalibrator(AbstractCalibrator, abc.ABC): def __init__( self, - configuration: Configuration, + configuration: Configuration | dict[str, ...], ): - super().__init__( - data_path=configuration.data_path) + if isinstance(configuration, dict): + configuration = IntrinsicCalibrator.Configuration(**configuration) + super().__init__(configuration=configuration) # noinspection DuplicatedCode def add_image( @@ -669,12 +669,11 @@ class ExtrinsicCalibrator(AbstractCalibrator, abc.ABC): def __init__( self, - configuration: Configuration, - **kwargs + configuration: Configuration | dict ): - super().__init__( - data_path=configuration.data_path, - **kwargs) + if isinstance(configuration, dict): + configuration = ExtrinsicCalibrator.Configuration(**configuration) + super().__init__(configuration=configuration) # noinspection DuplicatedCode def add_image( diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py index 146e3ba..8eee2f4 100644 --- a/src/implementations/extrinsic_charuco_opencv.py +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -16,17 +16,6 @@ import numpy from pydantic import BaseModel, Field from scipy.spatial.transform import Rotation -import sys -from typing import Final - - -_EPSILON: Final[float] = 0.0001 -_MAX_FLOAT: Final[float] = sys.float_info.max -_TERMINATION_ITERATION_COUNT: Final[int] = 500 -_TERMINATION_ROTATION_CHANGE_DEGREES: Final[float] = 0.05 -_TERMINATION_TRANSLATION_CHANGE: Final[float] = 0.5 - -_DEBUG_ANNOTATIONS: Final[bool] = False class _ImageData(BaseModel): @@ -122,8 +111,24 @@ def get_timestamp_container( raise IndexError() +class _Configuration(ExtrinsicCalibrator.Configuration): + termination_iteration_count: int = Field(default=500) + termination_rotation_change_degrees: int = Field(default=0.05) + termination_translation_change: int = Field(default=0.5) + ray_intersection_maximum_distance: float = Field(default=50.0) + + class CharucoOpenCVExtrinsicCalibrator(ExtrinsicCalibrator): + Configuration: type[ExtrinsicCalibrator.Configuration] = _Configuration + configuration: _Configuration + + def __init__(self, configuration: Configuration | dict): + if isinstance(configuration, dict): + configuration = _Configuration(**configuration) + self.configuration = configuration + super().__init__(configuration) + @staticmethod def _annotate_image( aruco_detector_parameters: cv2.aruco.DetectorParameters, @@ -137,11 +142,6 @@ def _annotate_image( aruco_detector_parameters=aruco_detector_parameters, aruco_dictionary=aruco_dictionary, image_greyscale=image_greyscale) - if _DEBUG_ANNOTATIONS: - for annotation in annotations: - cv2.drawMarker(img=image_rgb, position=(int(annotation.x_px), int(annotation.y_px)), color=(0, 255, 0)) - cv2.imshow("Test", image_rgb) - cv2.waitKey(0) return annotations def _calculate_implementation( @@ -206,7 +206,7 @@ def _calculate_implementation( detector.initial_to_reference = initial_to_reference detector.refined_to_reference = initial_to_reference - for i in range(0, _TERMINATION_ITERATION_COUNT): + for i in range(0, self.configuration.termination_iteration_count): # Update each ray based on the current pose for timestamp_data in data.timestamps: for image_data in timestamp_data.images: @@ -237,7 +237,7 @@ def _calculate_implementation( ray_list.append(ray) ray_intersection: MathUtils.RayIntersectionNOutput = MathUtils.closest_intersection_between_n_lines( rays=ray_list, - maximum_distance=_MAX_FLOAT) + maximum_distance=self.configuration.ray_intersection_maximum_distance) if ray_intersection.intersection_count() > 0: position: numpy.ndarray = ray_intersection.centroid() feature_data.position = Landmark( @@ -286,8 +286,8 @@ def _calculate_implementation( rotation_change_degrees: float = \ numpy.linalg.norm(Rotation.from_matrix(old_to_refined[0:3, 0:3]).as_rotvec(degrees=True)) detector_data.refined_to_reference = refined_to_reference - if rotation_change_degrees > _TERMINATION_ROTATION_CHANGE_DEGREES or \ - translation_change > _TERMINATION_TRANSLATION_CHANGE: + if rotation_change_degrees > self.configuration.termination_rotation_change_degrees or \ + translation_change > self.configuration.termination_translation_change: converged = False if converged: break diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index deaf893..645d681 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -87,7 +87,7 @@ def test(self): extrinsic_calibration: ExtrinsicCalibration with TemporaryDirectory() as temppath: extrinsic_calibrator: CharucoOpenCVExtrinsicCalibrator = CharucoOpenCVExtrinsicCalibrator( - configuration=IntrinsicCalibrator.Configuration(data_path=temppath)) + configuration=CharucoOpenCVExtrinsicCalibrator.Configuration(data_path=temppath)) for frame_id, image_filepaths_by_camera_id in image_filepaths_by_frame_camera.items(): for camera_id, image_filepath in image_filepaths_by_camera_id.items(): image: numpy.ndarray = cv2.imread(image_filepath) From 77228803c7d7cf4c07aa8ba5cb30b0a37b47e4cd Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 14 Aug 2025 17:07:38 -0400 Subject: [PATCH 19/33] WIP: Add testing against theoretical values for extrinsic calibrator --- src/common/math.py | 15 ++++++ test/test_extrinsic_calibration.py | 85 ++++++++++++++++++++++++++++-- 2 files changed, 95 insertions(+), 5 deletions(-) diff --git a/src/common/math.py b/src/common/math.py index 8a9cddd..8f59cdf 100644 --- a/src/common/math.py +++ b/src/common/math.py @@ -166,6 +166,21 @@ def inverse(self) -> 'Matrix4x4': inv_numpy_array = numpy.linalg.inv(self.as_numpy_array()) return Matrix4x4.from_numpy_array(inv_numpy_array) + # Note that these methods are not yet tested, and are disabled for now + # def set_rotation_from_quaternion(self, quaternion: list[float]) -> None: + # if len(quaternion) < 4: + # raise ValueError() + # # noinspection PyArgumentList + # rotation_matrix: numpy.ndarray = Rotation.from_quat(quaternion).as_matrix() + # self.values[0:3] = rotation_matrix[0, 0:3] + # self.values[4:7] = rotation_matrix[1, 0:3] + # self.values[8:11] = rotation_matrix[2, 0:3] + # def set_translation(self, translation: list[float]) -> None: + # if len(translation) < 3: + # raise ValueError() + # a = self.values + # a[3], a[7], a[11] = translation[0], translation[1], translation[2] + @staticmethod def from_raw_values( v00, v01, v02, v03, diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index 645d681..db51f60 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -4,7 +4,6 @@ ImageResolution, \ ImageUtils, \ IntrinsicParameters, \ - IntrinsicCalibrator, \ KeyValueSimpleAny, \ KeyValueSimpleString from src.implementations.common_aruco_opencv import \ @@ -16,6 +15,7 @@ import numpy import os import re +from scipy.spatial.transform import Rotation from tempfile import TemporaryDirectory from typing import Final import unittest @@ -29,6 +29,9 @@ KeyValueSimpleString( key=ArucoOpenCVCommon.KEY_CORNER_REFINEMENT_METHOD, value=ArucoOpenCVCommon.CORNER_REFINEMENT_METHOD_SUBPIX)] +THRESHOLD_TRANSLATION_IN_PLANE_MM: Final[float] = 10 +THRESHOLD_TRANSLATION_OUT_OF_PLANE_MM: Final[float] = 25 +THRESHOLD_ROTATION_DEG: Final[float] = 1 class TestPoseSolver(unittest.TestCase): @@ -98,12 +101,84 @@ def test(self): timestamp_utc_iso8601=timestamps_iso8601_by_frame[frame_id]) _, extrinsic_calibration = extrinsic_calibrator.calculate(detector_intrinsics_by_label=intrinsics_by_camera) + # label, translation, rotation (as quaternion) + ground_truth_detector_poses: dict[str, tuple[list[float], list[float]]] = { + "01": ([-866.025, 0., 500.], [ 0.353553, -0.353553, -0.612372, 0.612372]), + "02": ([-612.372, -612.372, 500.], [ 0.46194, -0.191342, -0.331414, 0.800103]), + "03": ([0., -866.025, 500.], [ 0.5, 0., 0., 0.866025]), + "04": ([612.372, -612.372, 500.], [ 0.46194, 0.191342, 0.331414, 0.800103]), + "05": ([866.025, 0., 500.], [ 0.353553, 0.353553, 0.612372, 0.612372]), + "06": ([-707.107, 0., 707.107], [ 0.270598, -0.270598, -0.653281, 0.653282]), + "07": ([-500., -500., 707.107], [ 0.353553, -0.146447, -0.353553, 0.853553]), + "08": ([0., -707.107, 707.107], [ 0.382683, 0., 0., 0.92388]), + "09": ([500., -500., 707.107], [ 0.353553, 0.146447, 0.353553, 0.853553]), + "10": ([707.107, 0., 707.107], [ 0.270598, 0.270598, 0.653282, 0.653281])} + calibrated_value: ExtrinsicCalibrationDetectorResult for calibrated_value in extrinsic_calibration.calibrated_values: - print( - f"Detector {calibrated_value.detector_label}:\n" - f" Translation: {calibrated_value.detector_to_reference.get_translation()}\n" - f" Rotation: {calibrated_value.detector_to_reference.get_rotation_as_quaternion(canonical=True)}") + expected_translation: list[float] + expected_rotation_quaternion: list[float] + expected_translation, expected_rotation_quaternion = \ + ground_truth_detector_poses[calibrated_value.detector_label] + obtained_translation: list[float] = calibrated_value.detector_to_reference.get_translation() + obtained_rotation_quaternion: list[float] = \ + calibrated_value.detector_to_reference.get_rotation_as_quaternion() + + translation_difference_vector: numpy.ndarray = \ + numpy.asarray(expected_translation) - numpy.asarray(obtained_translation) + # noinspection PyArgumentList + image_plane_normal: numpy.ndarray = numpy.matmul( + Rotation.from_quat(expected_rotation_quaternion).as_matrix(), + numpy.asarray([0, 0, 1])) + translation_difference_in_plane_mm: float = numpy.dot( + translation_difference_vector, + image_plane_normal) + self.assertLess(translation_difference_in_plane_mm, THRESHOLD_TRANSLATION_IN_PLANE_MM) + + # Pythagorean theorem to get the translation component out of plane + translation_difference_out_of_plane_mm: float = numpy.sqrt( + translation_difference_in_plane_mm ** 2 + numpy.linalg.norm(translation_difference_vector) ** 2) + self.assertLess(translation_difference_out_of_plane_mm, THRESHOLD_TRANSLATION_OUT_OF_PLANE_MM) + + # noinspection PyArgumentList + rotation_difference_deg: float = numpy.linalg.norm( + Rotation.from_matrix(numpy.matmul( + Rotation.from_quat(expected_rotation_quaternion).as_matrix(), + numpy.linalg.inv(Rotation.from_quat(obtained_rotation_quaternion).as_matrix()) + )).as_rotvec(degrees=True)) + self.assertLess(rotation_difference_deg, THRESHOLD_ROTATION_DEG) + + # print( + # f"{calibrated_value.detector_label}: \n" + # f" Expected translation: {expected_translation}\n" + # f" Obtained translation: {obtained_translation}\n" + # f" Expected rotation: {expected_rotation_quaternion}\n" + # f" Obtained rotation: {obtained_rotation_quaternion}\n" + # f" Translation Diff IP: {translation_difference_in_plane_mm}\n" + # f" Translation Diff OOP: {translation_difference_out_of_plane_mm}\n" + # f" Rotation Diff Deg: {rotation_difference_deg}") + + # These are from the Blender file, copied by hand. + # They are not currently used in the test, but they are here for possible later use. + # Note that Blender represents quaternions in WXYZ, and here it is XYZW. + # The numbers have not been verified, and as such it is possible that there may be some errors. + _ground_truth_frame_poses: dict[str, tuple[list[float], list[float]]] = { + "A0": ([0., 0., 10.], [ 0., 0., 0., 1.]), + "B1": ([125., 100., 30.], [ 0.065263, -0.113039, 0.495722, 0.858616]), + "B2": ([-125., 100., 30.], [ 0.113039, -0.065263, 0.858616, 0.495722]), + "B3": ([125., -100., 30.], [ 0.065263, 0.113039, 0.495722, -0.858616]), + "B4": ([-125., -100., 30.], [ 0.113039, 0.065263, 0.858616, -0.495722]), + "C1": ([250., 250., 30.], [ 0.065263, -0.113039, 0.495722, 0.858616]), + "C2": ([-250., 250., 30.], [ 0.113039, -0.065263, 0.858616, 0.495722]), + "C3": ([250., -250., 30.], [ 0.065263, 0.113039, 0.495722, -0.858616]), + "C4": ([-250., -250., 30.], [ 0.113039, 0.065263, 0.858616, -0.495722]), + "D1": ([-25., -25., 80.], [ 0.176704, -0.4266, 0.339444, 0.819491]), + "D2": ([25., -25., 80.], [ 0.4266, -0.176703, 0.819491, 0.339444]), + "D3": ([-25., 25., 80.], [ -0.176704, -0.4266, -0.339444, 0.819491]), + "D4": ([25., 25., 80.], [ 0.4266, 0.176704, 0.819491, -0.339445]), + "E1": ([-175., 5., 105.], [ 0.06027, -0.457798, 0.115778, 0.879422]), + "E2": ([0., -175., 105.], [ 0.326506, -0.326506, 0.627211, 0.627211]), + "E3": ([175., -5., 105.], [ 0.457798, -0.06027, 0.879422, 0.115778])} if __name__ == "__main__": From e2236470b33df302ef4899633624f7a7bc6b35f2 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 14 Aug 2025 17:23:01 -0400 Subject: [PATCH 20/33] WIP: Centralize aruco detection parameter creation so it's easier to have a "standard" configuration --- src/implementations/annotator_aruco_opencv.py | 4 ++-- src/implementations/common_aruco_opencv.py | 14 ++++++++++++++ src/implementations/extrinsic_charuco_opencv.py | 5 +++-- src/implementations/intrinsic_charuco_opencv.py | 3 ++- 4 files changed, 21 insertions(+), 5 deletions(-) diff --git a/src/implementations/annotator_aruco_opencv.py b/src/implementations/annotator_aruco_opencv.py index 672dc95..fb3458d 100644 --- a/src/implementations/annotator_aruco_opencv.py +++ b/src/implementations/annotator_aruco_opencv.py @@ -34,8 +34,8 @@ def __init__( configuration=configuration, status_message_source=status_message_source) - self._aruco_dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) - self._aruco_parameters = cv2.aruco.DetectorParameters() + self._aruco_dictionary = ArucoOpenCVCommon.standard_aruco_dictionary() + self._aruco_parameters = ArucoOpenCVCommon.standard_aruco_detection_parameters() self._snapshots_identified = list() self._snapshots_unidentified = list() self._update_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) diff --git a/src/implementations/common_aruco_opencv.py b/src/implementations/common_aruco_opencv.py index d63aa8c..290ac27 100644 --- a/src/implementations/common_aruco_opencv.py +++ b/src/implementations/common_aruco_opencv.py @@ -682,6 +682,20 @@ def assign_key_value_list_to_aruco_detection_parameters( mismatched_keys.append(key_value.key) return mismatched_keys + @staticmethod + def standard_aruco_dictionary() -> cv2.aruco.Dictionary: + return cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100) + + @staticmethod + def standard_aruco_detection_parameters() -> cv2.aruco.DetectorParameters: + aruco_detector_parameters: cv2.aruco.DetectorParameters = cv2.aruco.DetectorParameters() + # TODO + # Curiously and counterintuitively, at least in testing with extrinsic calibration, + # it looks like no corner refinement may yield more accurate results than subpixel. + # More investigation seems warranted. + # aruco_detector_parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX + return aruco_detector_parameters + @staticmethod def target_from_marker_parameters( base_label : str, diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py index 8eee2f4..4005341 100644 --- a/src/implementations/extrinsic_charuco_opencv.py +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -150,8 +150,9 @@ def _calculate_implementation( image_metadata_list: list[ExtrinsicCalibrator.ImageMetadata] ) -> tuple[ExtrinsicCalibration, list[ExtrinsicCalibrator.ImageMetadata]]: charuco_spec: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() - aruco_detector_parameters: cv2.aruco.DetectorParameters = cv2.aruco.DetectorParameters() - aruco_dictionary: cv2.aruco.Dictionary = charuco_spec.aruco_dictionary() + aruco_detector_parameters: cv2.aruco.DetectorParameters = \ + ArucoOpenCVCommon.standard_aruco_detection_parameters() + aruco_dictionary: cv2.aruco.Dictionary = ArucoOpenCVCommon.standard_aruco_dictionary() charuco_target: Target = charuco_spec.as_target(target_label="board") # Populate _CalibrationData structure, including detection of annotations diff --git a/src/implementations/intrinsic_charuco_opencv.py b/src/implementations/intrinsic_charuco_opencv.py index 07488a4..6ad0e66 100644 --- a/src/implementations/intrinsic_charuco_opencv.py +++ b/src/implementations/intrinsic_charuco_opencv.py @@ -18,7 +18,8 @@ def _calculate_implementation( image_resolution: ImageResolution, image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] ) -> tuple[IntrinsicCalibration, list[IntrinsicCalibrator.ImageMetadata]]: - aruco_detector_parameters: ... = cv2.aruco.DetectorParameters() + aruco_detector_parameters: cv2.aruco.DetectorParameters = \ + ArucoOpenCVCommon.standard_aruco_detection_parameters() # mismatched_keys: list[str] = ArucoOpenCVAnnotator.assign_key_value_list_to_aruco_detection_parameters( # detection_parameters=aruco_detector_parameters, From 071fcc4934dccf5dd2c8b2397bf869ae98de7c93 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 20 Aug 2025 17:15:49 -0400 Subject: [PATCH 21/33] MNT: Expose ExtrinsicCalibrator functionality in Mixer --- src/common/__init__.py | 5 +- src/common/calibration.py | 87 +++-- src/common/mct_component.py | 2 +- src/{pose_solver => common}/pose_solver.py | 159 ++++++-- src/controller/connection.py | 36 +- src/controller/mct_controller.py | 30 +- src/detector/__init__.py | 41 +- src/detector/api.py | 366 ++++++++---------- src/detector/{detector_app.py => app.py} | 33 +- src/detector/detector.py | 270 ++++++++----- src/gui/panels/board_builder_panel.py | 12 +- src/gui/panels/calibrator_panel.py | 80 ++-- src/gui/panels/detector_panel.py | 10 +- src/gui/panels/pose_solver_panel.py | 8 +- .../extrinsic_charuco_opencv.py | 5 +- src/main_detector.py | 2 +- src/{pose_solver => mixer}/__init__.py | 8 +- src/mixer/api.py | 354 +++++++++++++++++ .../pose_solver_app.py => mixer/app.py} | 33 +- src/mixer/mixer.py | 362 +++++++++++++++++ src/pose_solver/api.py | 152 -------- src/pose_solver/pose_solver_api.py | 181 --------- src/pose_solver/structures.py | 114 ------ test/test_extrinsic_calibration.py | 6 +- test/test_pose_solver.py | 117 +++--- 25 files changed, 1441 insertions(+), 1032 deletions(-) rename src/{pose_solver => common}/pose_solver.py (79%) rename src/detector/{detector_app.py => app.py} (89%) rename src/{pose_solver => mixer}/__init__.py (69%) create mode 100644 src/mixer/api.py rename src/{pose_solver/pose_solver_app.py => mixer/app.py} (75%) create mode 100644 src/mixer/mixer.py delete mode 100644 src/pose_solver/api.py delete mode 100644 src/pose_solver/pose_solver_api.py delete mode 100644 src/pose_solver/structures.py diff --git a/src/common/__init__.py b/src/common/__init__.py index 3cb8897..686dbea 100644 --- a/src/common/__init__.py +++ b/src/common/__init__.py @@ -43,7 +43,10 @@ from .mct_component import \ DetectorFrame, \ MCTComponent, \ - PoseSolverFrame + MixerFrame +from .pose_solver import \ + PoseSolver, \ + PoseSolverException from .serialization import \ IOUtils, \ KeyValueSimpleAbstract, \ diff --git a/src/common/calibration.py b/src/common/calibration.py index 5ffbd6a..b777ae8 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -1,3 +1,5 @@ +from urllib import request + from .image_processing import \ ImageFormat, \ ImageResolution, \ @@ -234,6 +236,12 @@ def _exists_on_filesystem( private_message=errors[_PRIVATE_MESSAGE_KEY]) return return_value + def get_image_by_identifier( + self, + identifier: str + ) -> str: + return self._load_image(identifier=identifier) + # noinspection DuplicatedCode def _get_result_metadata_by_identifier( self, @@ -306,7 +314,7 @@ def _load_dict_from_filepath( return json_dict # noinspection DuplicatedCode - def load_image( + def _load_image( self, identifier: str ) -> str: # image in base64 @@ -423,27 +431,27 @@ def update_image_metadata( # noinspection DuplicatedCode def update_result_metadata( self, - identifier: str, - state: _ResultState, + result_identifier: str, + result_state: _ResultState, result_label: str | None = None ) -> None: match_count: int = 0 matched_metadata: _ResultMetadata | None = None for metadata in self._data_ledger.result_metadata_list: - if metadata.identifier == identifier: + if metadata.identifier == result_identifier: match_count += 1 matched_metadata = metadata if match_count < 1: raise MCTCalibrationError( reason=CalibrationErrorReason.DATA_NOT_FOUND, - private_message=f"Identifier {identifier} is not associated with any result.") + private_message=f"Identifier {result_identifier} is not associated with any result.") elif match_count > 1: raise MCTCalibrationError( reason=CalibrationErrorReason.INVALID_STATE, - private_message=f"Identifier {identifier} is associated with multiple results. " + private_message=f"Identifier {result_identifier} is associated with multiple results. " "This suggests that the data ledger is in an inconsistent state. " "Please manually correct it, or recreate it.") - matched_metadata.state = state + matched_metadata.state = result_state if result_label is not None: matched_metadata.result_label = result_label self._save_data_ledger() @@ -548,8 +556,8 @@ def calculate( # For now, assume that the user's intent is to set any new calibration to be the active one self.update_result_metadata( - identifier=result_metadata.identifier, - state=_ResultState.ACTIVE) + result_identifier=result_metadata.identifier, + result_state=_ResultState.ACTIVE) return result_identifier, intrinsic_calibration @@ -578,6 +586,7 @@ def get_result_active_by_image_resolution( for result_metadata in self._data_ledger.result_metadata_list: if result_metadata.state == _ResultState.ACTIVE and result_metadata.resolution == image_resolution: matched_metadata = result_metadata + match_count += 1 if match_count < 1: raise MCTCalibrationError( @@ -591,9 +600,6 @@ def get_result_active_by_image_resolution( "To recover from this ambiguous state, explicitly set " "one of the results as \"active\", which will reset others to \"retain\".") - if matched_metadata is None: - return None - return self._load_result_by_metadata( metadata=matched_metadata, result_type=IntrinsicCalibration) @@ -625,18 +631,18 @@ def list_result_metadata_by_image_resolution( def update_result_metadata( self, - identifier: str, - state: ResultState, + result_identifier: str, + result_state: ResultState, result_label: str | None = None ) -> None: super().update_result_metadata( - identifier=identifier, - state=state, + result_identifier=result_identifier, + result_state=result_state, result_label=result_label) # Some cleanup as applicable - if state == _ResultState.ACTIVE: - matching_metadata: _ResultMetadata = self._get_result_metadata_by_identifier(identifier=identifier) + if result_state == _ResultState.ACTIVE: + matching_metadata: _ResultMetadata = self._get_result_metadata_by_identifier(identifier=result_identifier) for metadata in self._data_ledger.result_metadata_list: if metadata.resolution == matching_metadata.resolution and \ metadata.identifier != matching_metadata.identifier: @@ -667,12 +673,15 @@ class ExtrinsicCalibrator(AbstractCalibrator, abc.ABC): ResultState: type[_ResultState] = _ResultState ResultMetadata: type[_ResultMetadata] = _ResultMetadata + detector_intrinsics_by_label: dict[str, IntrinsicParameters] + def __init__( self, configuration: Configuration | dict ): if isinstance(configuration, dict): configuration = ExtrinsicCalibrator.Configuration(**configuration) + self.detector_intrinsics_by_label = dict() super().__init__(configuration=configuration) # noinspection DuplicatedCode @@ -698,8 +707,7 @@ def add_image( return metadata.identifier def calculate( - self, - detector_intrinsics_by_label: dict[str, IntrinsicParameters] + self ) -> tuple[str, ExtrinsicCalibration]: """ :returns: a tuple containing a result identifier (GUID as string) and the ExtrinsicCalibration structure @@ -741,7 +749,6 @@ def calculate( public_message=f"No images found for calibration.") extrinsic_calibration, image_metadata_list = self._calculate_implementation( - detector_intrinsics_by_label=detector_intrinsics_by_label, image_metadata_list=image_metadata_list) result_identifier: str = str(uuid.uuid4()) @@ -756,15 +763,21 @@ def calculate( # For now, assume that the user's intent is to set any new calibration to be the active one self.update_result_metadata( - identifier=result_metadata.identifier, - state=_ResultState.ACTIVE) + result_identifier=result_metadata.identifier, + result_state=_ResultState.ACTIVE) return result_identifier, extrinsic_calibration + def intrinsic_parameters_update( + self, + detector_label: str, + intrinsic_parameters: IntrinsicParameters + ) -> None: + self.detector_intrinsics_by_label[detector_label] = intrinsic_parameters + @abc.abstractmethod def _calculate_implementation( self, - detector_intrinsics_by_label: dict[str, IntrinsicParameters], image_metadata_list: list[ImageMetadata] ): pass @@ -776,3 +789,29 @@ def get_result( return self._load_result( identifier=result_identifier, result_type=ExtrinsicCalibration) + + def get_result_active( + self + ) -> Optional[ExtrinsicCalibration]: + match_count: int = 0 + matched_metadata: IntrinsicCalibrator.ResultMetadata | None = None + for result_metadata in self._data_ledger.result_metadata_list: + if result_metadata.state == _ResultState.ACTIVE: + matched_metadata = result_metadata + match_count += 1 + + if match_count < 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.DATA_NOT_FOUND, + public_message=f"No result metadata is active. " + "Please ensure one has been selected, then try again.") + if match_count > 1: + raise MCTCalibrationError( + reason=CalibrationErrorReason.INVALID_STATE, + public_message=f"Multiple result metadata are active. " + "To recover from this ambiguous state, explicitly set " + "one of the results as \"active\", which will reset others to \"retain\".") + + return self._load_result_by_metadata( + metadata=matched_metadata, + result_type=IntrinsicCalibration) diff --git a/src/common/mct_component.py b/src/common/mct_component.py index 7e9a41d..7ab5513 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -62,7 +62,7 @@ def timestamp_utc(self): return datetime.datetime.fromisoformat(self.timestamp_utc_iso8601) -class PoseSolverFrame(BaseModel): +class MixerFrame(BaseModel): detector_poses: list[Pose] | None = Field() target_poses: list[Pose] | None = Field() timestamp_utc_iso8601: str = Field() diff --git a/src/pose_solver/pose_solver.py b/src/common/pose_solver.py similarity index 79% rename from src/pose_solver/pose_solver.py rename to src/common/pose_solver.py index 310b8c1..34232da 100644 --- a/src/pose_solver/pose_solver.py +++ b/src/common/pose_solver.py @@ -1,22 +1,19 @@ -from .structures import \ - DetectorRecord, \ - PoseSolverParameters -from src.common import \ - Annotation, \ - DetectorFrame, \ +from .image_processing import Annotation +from .math import \ IntrinsicParameters, \ IterativeClosestPointParameters, \ MathUtils, \ Matrix4x4, \ - MCTError, \ Pose, \ Ray, \ Target +from .status import MCTError import cv2 import cv2.aruco import datetime import itertools import numpy +from pydantic import BaseModel, Field from scipy.spatial.transform import Rotation from typing import Final, TypeVar @@ -28,6 +25,76 @@ ValueType = TypeVar("ValueType") +class _DetectorRecord: + """ + Class whose purpose is to keep track of the latest position of each landmark (in annotation form) + for a single detector. + """ + + class TimestampedAnnotation: + annotation: Annotation + timestamp_utc: datetime.datetime + def __init__( + self, + annotation: Annotation, + timestamp_utc: datetime.datetime + ): + self.annotation = annotation + self.timestamp_utc = timestamp_utc + + _timestamped_annotations: dict[str, TimestampedAnnotation] + + def __init__(self): + self._timestamped_annotations = dict() + + def add_frame_record( + self, + frame_annotations: list[Annotation], + frame_timestamp_utc: datetime.datetime + ) -> None: + for annotation in frame_annotations: + if annotation.feature_label not in self._timestamped_annotations: + self._timestamped_annotations[annotation.feature_label] = _DetectorRecord.TimestampedAnnotation( + annotation=annotation, + timestamp_utc=frame_timestamp_utc) + continue + timestamped_annotation: _DetectorRecord.TimestampedAnnotation = \ + self._timestamped_annotations[annotation.feature_label] + if frame_timestamp_utc > timestamped_annotation.timestamp_utc: + self._timestamped_annotations[annotation.feature_label] = _DetectorRecord.TimestampedAnnotation( + annotation=annotation, + timestamp_utc=frame_timestamp_utc) + + def clear_frame_records(self): + self._timestamped_annotations.clear() + + def clear_frame_records_older_than( + self, + timestamp_utc: datetime.datetime + ) -> bool: + """ + returns True if any changes were made + """ + feature_labels_to_remove: list[str] = list() + entry: _DetectorRecord.TimestampedAnnotation + for entry in self._timestamped_annotations.values(): + if entry.timestamp_utc < timestamp_utc: + feature_labels_to_remove.append(entry.annotation.feature_label) + if len(feature_labels_to_remove) <= 0: + return False + for feature_label in feature_labels_to_remove: + del self._timestamped_annotations[feature_label] + return True + + def get_annotations( + self, + deep_copy: bool = True + ) -> list[Annotation]: + if deep_copy: + return [entry.annotation.model_copy() for entry in self._timestamped_annotations.values()] + return [entry.annotation for entry in self._timestamped_annotations.values()] + + class PoseSolverException(MCTError): message: str @@ -41,20 +108,39 @@ class PoseSolver: Class containing the actual "solver" logic, kept separate from the API. """ + class Configuration(BaseModel): + minimum_detector_count: int = Field(default=2) + ray_intersection_maximum_distance: float = Field(default=10.0, description="millimeters") + icp_termination_iteration_count: int = Field(default=50) + icp_termination_translation: float = Field(default=0.005, description="millimeters") + icp_termination_rotation_radians: float = Field(default=0.0005) + icp_termination_mean_point_distance: float = Field(default=0.1, description="millimeters") + icp_termination_rms_point_distance: float = Field(default=0.1, description="millimeters") + + denoise_outlier_maximum_distance: float = Field(default=10.0) + denoise_outlier_maximum_angle_degrees: float = Field(default=5.0) + denoise_storage_size: int = Field(default=10) + denoise_filter_size: int = Field(default=7) + denoise_required_starting_streak: int = Field(default=3) + + # aruco_pose_estimator_method: int + # SOLVEPNP_ITERATIVE works okay but is susceptible to optical illusions (flipping) + # SOLVEPNP_P3P appears to return nan's on rare occasion + # SOLVEPNP_SQPNP appears to return nan's on rare occasion + # SOLVEPNP_IPPE_SQUARE does not seem to work very well at all, translation is much smaller than expected + # bookkeeping _last_change_timestamp_utc: datetime.datetime _last_updated_timestamp_utc: datetime.datetime # inputs - _parameters: PoseSolverParameters + _configuration: Configuration _intrinsics_by_detector_label: dict[str, IntrinsicParameters] _extrinsics_by_detector_label: dict[str, Matrix4x4] _targets: list[Target] # First target is considered the "reference" # input per frame - _detector_records_by_detector_label: dict[str, DetectorRecord] + _detector_records_by_detector_label: dict[str, _DetectorRecord] - # internal threshold - _minimum_marker_age_before_removal_seconds: float # use this to make sure each marker is associated uniquely to a single target _landmark_target_map: dict[str, Target] # Each marker shall be used at most once by a single target @@ -68,18 +154,12 @@ def __init__( self._last_change_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) self._last_updated_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) - self._parameters = PoseSolverParameters() + self._configuration = PoseSolver.Configuration() self._intrinsics_by_detector_label = dict() self._extrinsics_by_detector_label = dict() self._targets = list() self._detector_records_by_detector_label = dict() - self._minimum_marker_age_before_removal_seconds = max([ - self._parameters.POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS, - self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS]) self._landmark_target_map = dict() self._poses_by_target_label = dict() @@ -88,12 +168,15 @@ def __init__( def add_detector_frame( self, detector_label: str, - detector_frame: DetectorFrame + frame_annotations: list[Annotation], + frame_timestamp_utc: datetime.datetime ) -> None: if detector_label not in self._detector_records_by_detector_label: - self._detector_records_by_detector_label[detector_label] = DetectorRecord() + self._detector_records_by_detector_label[detector_label] = _DetectorRecord() self._detector_records_by_detector_label[detector_label].clear_frame_records() - self._detector_records_by_detector_label[detector_label].add_frame_record(detector_frame) + self._detector_records_by_detector_label[detector_label].add_frame_record( + frame_annotations=frame_annotations, + frame_timestamp_utc=frame_timestamp_utc) self._last_change_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) def add_target( @@ -200,6 +283,7 @@ def _calculate_reprojection_error_for_pose( object_to_reference_rotation_quaternion: list[float] ) -> float: object_to_reference_matrix = numpy.identity(4, dtype="float32") + # noinspection PyArgumentList object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(object_to_reference_rotation_quaternion).as_matrix() object_to_reference_matrix[0:3, 3] = object_to_reference_translation object_points_reference = numpy.empty((len(object_points_target), 3), dtype="float32") @@ -211,6 +295,7 @@ def _calculate_reprojection_error_for_pose( detector_label: str = ray_set.detector_label reference_to_detector_matrix: Matrix4x4 = ray_set.detector_to_reference_matrix reference_to_detector: numpy.ndarray = reference_to_detector_matrix.as_numpy_array() + # noinspection PyArgumentList reference_to_detector_rotation_vector = \ Rotation.as_rotvec(Rotation.from_matrix(reference_to_detector[0:3, 0:3])) reference_to_detector_translation = reference_to_detector[0:3, 3] @@ -234,24 +319,24 @@ def _calculate_reprojection_error_for_pose( rms_reprojection_error = numpy.sqrt(mean_reprojection_errors_squared) return rms_reprojection_error - @staticmethod def _denoise_is_pose_pair_outlier( + self, pose_a_object_to_reference_matrix: numpy.ndarray, - pose_b_object_to_reference_matrix: numpy.ndarray, - parameters: PoseSolverParameters + pose_b_object_to_reference_matrix: numpy.ndarray ) -> bool: position_a = pose_a_object_to_reference_matrix[0:3, 3] position_b = pose_b_object_to_reference_matrix[0:3, 3] distance_millimeters = numpy.linalg.norm(position_a - position_b) - if distance_millimeters > parameters.DENOISE_OUTLIER_DISTANCE_MILLIMETERS: + if distance_millimeters > self._configuration.denoise_outlier_maximum_distance: return True orientation_a = pose_a_object_to_reference_matrix[0:3, 0:3] orientation_b = pose_b_object_to_reference_matrix[0:3, 0:3] rotation_a_to_b = numpy.matmul(orientation_a, numpy.linalg.inv(orientation_b)) + # noinspection PyArgumentList angle_degrees = numpy.linalg.norm(Rotation.from_matrix(rotation_a_to_b).as_rotvec()) - if angle_degrees > parameters.DENOISE_OUTLIER_DISTANCE_MILLIMETERS: + if angle_degrees > self._configuration.denoise_outlier_maximum_angle_degrees: return True return False @@ -264,15 +349,15 @@ def _denoise_detector_to_reference_pose( raw_poses: list[...] # In order, oldest to newest ) -> ...: most_recent_pose = raw_poses[-1] - max_storage_size: int = self._parameters.DENOISE_STORAGE_SIZE - filter_size: int = self._parameters.DENOISE_FILTER_SIZE + max_storage_size: int = self._configuration.denoise_storage_size + filter_size: int = self._configuration.denoise_filter_size if filter_size <= 1 or max_storage_size <= 1: return most_recent_pose # trivial case # find a consistent range of recent indices poses: list[...] = list(raw_poses) poses.reverse() # now they are sorted so that the first element is most recent - required_starting_streak: int = self._parameters.DENOISE_REQUIRED_STARTING_STREAK + required_starting_streak: int = self._configuration.denoise_required_starting_streak starting_index: int = -1 # not yet known, we want to find this if required_starting_streak <= 1: starting_index = 0 # trivial case @@ -304,11 +389,13 @@ def _denoise_detector_to_reference_pose( translations = [list(pose.object_to_reference_matrix[0:3, 3]) for pose in poses_to_average] + # noinspection PyArgumentList orientations = [list(Rotation.from_matrix(pose.object_to_reference_matrix[0:3, 0:3]).as_quat(canonical=True)) for pose in poses_to_average] filtered_translation = MathUtils.average_vector(translations) filtered_orientation = MathUtils.average_quaternion(orientations) filtered_object_to_reference_matrix = numpy.identity(4, dtype="float32") + # noinspection PyArgumentList filtered_object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(filtered_orientation).as_matrix() filtered_object_to_reference_matrix[0:3, 3] = filtered_translation # return PoseData( @@ -393,7 +480,7 @@ def update(self) -> None: for feature_label, rays_by_detector_label in rays_by_feature_and_detector.items(): intersection_result = MathUtils.closest_intersection_between_n_lines( rays=list(rays_by_detector_label.values()), - maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) + maximum_distance=self._configuration.ray_intersection_maximum_distance) if intersection_result.centroids.shape[0] == 0: feature_labels_with_rays_only.append(feature_label) break @@ -420,7 +507,7 @@ def update(self) -> None: continue # No information on which to base a pose detector_count_seeing_target: int = len(detector_labels_seeing_target) - if detector_count_seeing_target < self._parameters.minimum_detector_count or \ + if detector_count_seeing_target < self._configuration.minimum_detector_count or \ detector_count_seeing_target <= 0: continue @@ -452,11 +539,11 @@ def update(self) -> None: list(rays_by_feature_and_detector[feature_label].values()) for feature_label in target_feature_labels_with_rays])) iterative_closest_point_parameters = IterativeClosestPointParameters( - termination_iteration_count=self._parameters.icp_termination_iteration_count, - termination_delta_translation=self._parameters.icp_termination_translation, - termination_delta_rotation_radians=self._parameters.icp_termination_rotation_radians, - termination_mean_point_distance=self._parameters.icp_termination_mean_point_distance, - termination_rms_point_distance=self._parameters.icp_termination_rms_point_distance) + termination_iteration_count=self._configuration.icp_termination_iteration_count, + termination_delta_translation=self._configuration.icp_termination_translation, + termination_delta_rotation_radians=self._configuration.icp_termination_rotation_radians, + termination_mean_point_distance=self._configuration.icp_termination_mean_point_distance, + termination_rms_point_distance=self._configuration.icp_termination_rms_point_distance) if len(target_feature_labels_with_intersections) >= 1: initial_detected_to_reference_matrix = MathUtils.register_corresponding_points( point_set_from=detected_known_points, diff --git a/src/controller/connection.py b/src/controller/connection.py index 696e8f7..41596c1 100644 --- a/src/controller/connection.py +++ b/src/controller/connection.py @@ -13,20 +13,20 @@ MCTResponse, \ MCTResponseSeries, \ Pose, \ - PoseSolverFrame, \ + MixerFrame, \ SeverityLabel, \ StatusMessage, \ Target, \ TimestampGetResponse from src.detector.api import \ - CalibrationCalculateResponse, \ - CalibrationImageAddResponse, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListResponse, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetResponse, \ - CalibrationResultGetActiveResponse, \ - CalibrationResultMetadataListResponse, \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationImageAddResponse, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultMetadataListResponse, \ CameraImageGetResponse, \ CameraParametersGetResponse, \ CameraParametersSetRequest, \ @@ -645,14 +645,14 @@ def handle_initialization_response_series( def supported_response_types(self) -> list[type[MCTResponse]]: return super().supported_response_types() + [ - CalibrationCalculateResponse, - CalibrationImageAddResponse, - CalibrationImageGetResponse, - CalibrationImageMetadataListResponse, - CalibrationResolutionListResponse, - CalibrationResultGetResponse, - CalibrationResultGetActiveResponse, - CalibrationResultMetadataListResponse, + IntrinsicCalibrationCalculateResponse, + IntrinsicCalibrationImageAddResponse, + IntrinsicCalibrationImageGetResponse, + IntrinsicCalibrationImageMetadataListResponse, + IntrinsicCalibrationResolutionListResponse, + IntrinsicCalibrationResultGetResponse, + IntrinsicCalibrationResultGetActiveResponse, + IntrinsicCalibrationResultMetadataListResponse, CameraImageGetResponse, CameraParametersGetResponse, CameraParametersSetResponse, @@ -673,7 +673,7 @@ class PoseSolverConnection(Connection): target_poses: list[Pose] detector_timestamps: dict[str, datetime.datetime] # access by detector_label poses_timestamp: datetime.datetime - recording: list[PoseSolverFrame] | None + recording: list[MixerFrame] | None def __init__( self, diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index eae1017..647a212 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -17,7 +17,7 @@ MCTRequestSeries, \ MCTResponse, \ MCTResponseSeries, \ - PoseSolverFrame, \ + MixerFrame, \ SeverityLabel, \ StatusMessageSource, \ TimestampGetRequest, \ @@ -25,15 +25,15 @@ TimeSyncStartRequest, \ TimeSyncStopRequest from src.detector import \ - CalibrationResultGetActiveRequest, \ - CalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse, \ CameraResolutionGetRequest, \ CameraResolutionGetResponse, \ Detector, \ DetectorFrameGetRequest, \ DetectorFrameGetResponse from src.pose_solver import \ - PoseSolverAPI, \ + MixerBackend, \ PoseSolverAddDetectorFrameRequest, \ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ @@ -58,7 +58,7 @@ _ROLE_LABEL: Final[str] = "controller" _SUPPORTED_ROLES: Final[list[str]] = [ Detector.get_role_label(), - PoseSolverAPI.get_role_label()] + MixerBackend.get_role_label()] _TIME_SYNC_SAMPLE_MAXIMUM_COUNT: Final[int] = 5 @@ -171,7 +171,7 @@ def add_connection( return_value: DetectorConnection = DetectorConnection(component_address=component_address) self._connections[label] = return_value return return_value - elif component_address.role == PoseSolverAPI.get_role_label(): + elif component_address.role == MixerBackend.get_role_label(): return_value: PoseSolverConnection = PoseSolverConnection(component_address=component_address) self._connections[label] = return_value return return_value @@ -226,7 +226,7 @@ def _advance_startup_state(self) -> None: request_series: MCTRequestSeries = MCTRequestSeries( series=[ CameraResolutionGetRequest(), - CalibrationResultGetActiveRequest()]) + IntrinsicCalibrationResultGetActiveRequest()]) self._pending_request_ids.append(self.request_series_push( connection_label=detector_label, request_series=request_series)) @@ -284,7 +284,7 @@ def get_active_pose_solver_labels(self) -> list[str]: """ See get_component_labels. """ - return self.get_component_labels(role=PoseSolverAPI.get_role_label(), active=True) + return self.get_component_labels(role=MixerBackend.get_role_label(), active=True) def get_component_labels( self, @@ -356,7 +356,7 @@ def get_live_detector_frame( def get_live_pose_solver_frame( self, pose_solver_label: str - ) -> PoseSolverFrame | None: + ) -> MixerFrame | None: """ returns None if the pose solver does not exist, or has not been started, or if it has not yet gotten frames. """ @@ -365,7 +365,7 @@ def get_live_pose_solver_frame( connection_type=PoseSolverConnection) if pose_solver_connection is None: return None - return PoseSolverFrame( + return MixerFrame( detector_poses=pose_solver_connection.detector_poses, target_poses=pose_solver_connection.target_poses, timestamp_utc_iso8601=pose_solver_connection.poses_timestamp.isoformat()) @@ -387,7 +387,7 @@ def handle_error_response( def handle_response_calibration_result_get_active( self, - response: CalibrationResultGetActiveResponse, + response: IntrinsicCalibrationResultGetActiveResponse, detector_label: str ) -> None: detector_connection: DetectorConnection = self._get_connection( @@ -527,7 +527,7 @@ def handle_response_series( success: bool = True response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationResultGetActiveResponse): + if isinstance(response, IntrinsicCalibrationResultGetActiveResponse): self.handle_response_calibration_result_get_active( response=response, detector_label=response_series.responder) @@ -589,7 +589,7 @@ def recording_stop(self): # Do not record if specified if report.role == Detector.get_role_label() and not self._recording_detector: continue - if report.role == PoseSolverAPI.get_role_label() and not self._recording_pose_solver: + if report.role == MixerBackend.get_role_label() and not self._recording_pose_solver: continue if self._recording_save_path is not None: @@ -694,7 +694,7 @@ def start_up( raise RuntimeError("Cannot start up if controller isn't first stopped.") for connection in self._connections.values(): if mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == PoseSolverAPI.get_role_label(): + connection.get_role() == MixerBackend.get_role_label(): continue connection.start_up() @@ -732,7 +732,7 @@ def update( all_connected: bool = True for connection in connections: if self._startup_mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == PoseSolverAPI.get_role_label(): + connection.get_role() == MixerBackend.get_role_label(): continue if not connection.is_start_up_finished(): all_connected = False diff --git a/src/detector/__init__.py b/src/detector/__init__.py index 67a4432..8839013 100644 --- a/src/detector/__init__.py +++ b/src/detector/__init__.py @@ -1,23 +1,23 @@ from .api import \ - CalibrationCalculateRequest, \ - CalibrationCalculateResponse, \ - CalibrationDeleteStagedRequest, \ - CalibrationImageAddRequest, \ - CalibrationImageAddResponse, \ - CalibrationImageGetRequest, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListRequest, \ - CalibrationImageMetadataListResponse, \ - CalibrationImageMetadataUpdateRequest, \ - CalibrationResolutionListRequest, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetRequest, \ - CalibrationResultGetResponse, \ - CalibrationResultGetActiveRequest, \ - CalibrationResultGetActiveResponse, \ - CalibrationResultMetadataListRequest, \ - CalibrationResultMetadataListResponse, \ - CalibrationResultMetadataUpdateRequest, \ + IntrinsicCalibrationCalculateRequest, \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationDeleteStagedRequest, \ + IntrinsicCalibrationImageAddRequest, \ + IntrinsicCalibrationImageAddResponse, \ + IntrinsicCalibrationImageGetRequest, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListRequest, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationImageMetadataUpdateRequest, \ + IntrinsicCalibrationResolutionListRequest, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetRequest, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultMetadataListRequest, \ + IntrinsicCalibrationResultMetadataListResponse, \ + IntrinsicCalibrationResultMetadataUpdateRequest, \ CameraImageGetRequest, \ CameraImageGetResponse, \ CameraParametersGetRequest, \ @@ -34,5 +34,4 @@ AnnotatorParametersGetResponse, \ AnnotatorParametersSetRequest from .detector import \ - Detector, \ - DetectorConfiguration + Detector diff --git a/src/detector/api.py b/src/detector/api.py index 9ba82db..319bf80 100644 --- a/src/detector/api.py +++ b/src/detector/api.py @@ -9,436 +9,402 @@ MCTRequest, \ MCTResponse from pydantic import Field, SerializeAsAny -from typing import Final, Literal +from typing import Final class AnnotatorParametersGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" + _TYPE_IDENTIFIER: Final[str] = "detector_annotator_parameters_get" @staticmethod def type_identifier() -> str: return AnnotatorParametersGetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) class AnnotatorParametersGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_get" + _TYPE_IDENTIFIER: Final[str] = "detector_annotator_parameters_get" @staticmethod def type_identifier() -> str: return AnnotatorParametersGetResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() class AnnotatorParametersSetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_marker_parameters_set" + _TYPE_IDENTIFIER: Final[str] = "detector_annotator_parameters_set" @staticmethod def type_identifier() -> str: return AnnotatorParametersSetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() -class CalibrationCalculateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_calculate" +class CameraImageGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" @staticmethod def type_identifier() -> str: - return CalibrationCalculateRequest._TYPE_IDENTIFIER + return CameraImageGetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - image_resolution: ImageResolution = Field() + format: ImageFormat = Field() + requested_resolution: ImageResolution | None = Field(default=None) -class CalibrationCalculateResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_calculate" +class CameraImageGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" @staticmethod def type_identifier() -> str: - return CalibrationCalculateResponse._TYPE_IDENTIFIER + return CameraImageGetResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - result_identifier: str = Field() - intrinsic_calibration: IntrinsicCalibration = Field() + format: ImageFormat = Field() + image_base64: str = Field() -class CalibrationDeleteStagedRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_delete_staged" +class CameraParametersGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" @staticmethod def type_identifier() -> str: - return CalibrationDeleteStagedRequest._TYPE_IDENTIFIER + return CameraParametersGetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) -class CalibrationImageAddRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_add" +class CameraParametersGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" @staticmethod def type_identifier() -> str: - return CalibrationImageAddRequest._TYPE_IDENTIFIER + return CameraParametersGetResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() -class CalibrationImageAddResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_add" +class CameraParametersSetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" @staticmethod def type_identifier() -> str: - return CalibrationImageAddResponse._TYPE_IDENTIFIER + return CameraParametersSetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - image_identifier: str = Field() + parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() -class CalibrationImageGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_get" +class CameraParametersSetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" @staticmethod def type_identifier() -> str: - return CalibrationImageGetRequest._TYPE_IDENTIFIER + return CameraParametersSetResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - image_identifier: str = Field() + resolution: ImageResolution = Field() # Sometimes parameter changes may result in changes of resolution -class CalibrationImageGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_get" +class CameraResolutionGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" @staticmethod def type_identifier() -> str: - return CalibrationImageGetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + return CameraResolutionGetRequest._TYPE_IDENTIFIER - image_base64: str = Field() + parsable_type: str = Field(default=_TYPE_IDENTIFIER) -class CalibrationImageMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_list" +class CameraResolutionGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" @staticmethod def type_identifier() -> str: - return CalibrationImageMetadataListRequest._TYPE_IDENTIFIER + return CameraResolutionGetResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - image_resolution: ImageResolution = Field() + resolution: ImageResolution = Field() -class CalibrationImageMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_list" +class DetectorFrameGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" @staticmethod def type_identifier() -> str: - return CalibrationImageMetadataListResponse._TYPE_IDENTIFIER + return DetectorFrameGetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - metadata_list: list[IntrinsicCalibrator.ImageMetadata] = Field(default_factory=list) + include_detected: bool = Field(default=True) + include_rejected: bool = Field(default=True) -class CalibrationImageMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_image_metadata_update" +class DetectorFrameGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" @staticmethod def type_identifier() -> str: - return CalibrationImageMetadataUpdateRequest._TYPE_IDENTIFIER + return DetectorFrameGetResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - image_identifier: str = Field() - image_state: IntrinsicCalibrator.ImageState = Field() - image_label: str | None = Field(default=None) + frame: DetectorFrame = Field() -class CalibrationResolutionListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_detector_resolutions_list" +class DetectorStartRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_start" @staticmethod def type_identifier() -> str: - return CalibrationResolutionListRequest._TYPE_IDENTIFIER + return DetectorStartRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) -class CalibrationResolutionListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_detector_resolutions_list" +class DetectorStopRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_stop" @staticmethod def type_identifier() -> str: - return CalibrationResolutionListResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + return DetectorStopRequest._TYPE_IDENTIFIER - resolutions: list[ImageResolution] = Field() + parsable_type: str = Field(default=_TYPE_IDENTIFIER) -class CalibrationResultGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_get" +class IntrinsicCalibrationCalculateRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_calculate" @staticmethod def type_identifier() -> str: - return CalibrationResultGetRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationCalculateRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - result_identifier: str = Field() + image_resolution: ImageResolution = Field() -class CalibrationResultGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_get" +class IntrinsicCalibrationCalculateResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_calculate" @staticmethod def type_identifier() -> str: - return CalibrationResultGetResponse._TYPE_IDENTIFIER + return IntrinsicCalibrationCalculateResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + result_identifier: str = Field() intrinsic_calibration: IntrinsicCalibration = Field() -class CalibrationResultGetActiveRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_active_get" +class IntrinsicCalibrationDeleteStagedRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_delete_staged" @staticmethod def type_identifier() -> str: - return CalibrationResultGetActiveRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationDeleteStagedRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) -class CalibrationResultGetActiveResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_active_get" +class IntrinsicCalibrationImageAddRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_add" @staticmethod def type_identifier() -> str: - return CalibrationResultGetActiveResponse._TYPE_IDENTIFIER + return IntrinsicCalibrationImageAddRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - intrinsic_calibration: IntrinsicCalibration | None = Field() - -class CalibrationResultMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_list" +class IntrinsicCalibrationImageAddResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_add" @staticmethod def type_identifier() -> str: - return CalibrationResultMetadataListRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationImageAddResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - image_resolution: ImageResolution = Field() + image_identifier: str = Field() -class CalibrationResultMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_list" +class IntrinsicCalibrationImageGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_get" @staticmethod def type_identifier() -> str: - return CalibrationResultMetadataListResponse._TYPE_IDENTIFIER + return IntrinsicCalibrationImageGetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - metadata_list: list[IntrinsicCalibrator.ResultMetadata] = Field(default_factory=list) + image_identifier: str = Field() -class CalibrationResultMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_calibration_result_metadata_update" +class IntrinsicCalibrationImageGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_get" @staticmethod def type_identifier() -> str: - return CalibrationResultMetadataUpdateRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationImageGetResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - result_identifier: str = Field() - result_state: IntrinsicCalibrator.ResultState = Field() - result_label: str | None = Field(default=None) + image_base64: str = Field() -class CameraImageGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" +class IntrinsicCalibrationImageMetadataListRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_metadata_list" @staticmethod def type_identifier() -> str: - return CameraImageGetRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationImageMetadataListRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - format: ImageFormat = Field() - requested_resolution: ImageResolution | None = Field(default=None) + image_resolution: ImageResolution = Field() -class CameraImageGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" +class IntrinsicCalibrationImageMetadataListResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_metadata_list" @staticmethod def type_identifier() -> str: - return CameraImageGetResponse._TYPE_IDENTIFIER + return IntrinsicCalibrationImageMetadataListResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - format: ImageFormat = Field() - image_base64: str = Field() + metadata_list: list[IntrinsicCalibrator.ImageMetadata] = Field(default_factory=list) -class CameraParametersGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" +class IntrinsicCalibrationImageMetadataUpdateRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_metadata_update" @staticmethod def type_identifier() -> str: - return CameraParametersGetRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationImageMetadataUpdateRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + image_identifier: str = Field() + image_state: IntrinsicCalibrator.ImageState = Field() + image_label: str | None = Field(default=None) -class CameraParametersGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" + +class IntrinsicCalibrationResolutionListRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_detector_resolutions_list" @staticmethod def type_identifier() -> str: - return CameraParametersGetResponse._TYPE_IDENTIFIER + return IntrinsicCalibrationResolutionListRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() - -class CameraParametersSetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" +class IntrinsicCalibrationResolutionListResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_detector_resolutions_list" @staticmethod def type_identifier() -> str: - return CameraParametersSetRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationResolutionListResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() + resolutions: list[ImageResolution] = Field() -class CameraParametersSetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" +class IntrinsicCalibrationResultGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_get" @staticmethod def type_identifier() -> str: - return CameraParametersSetResponse._TYPE_IDENTIFIER + return IntrinsicCalibrationResultGetRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - resolution: ImageResolution = Field() # Sometimes parameter changes may result in changes of resolution + result_identifier: str = Field() -class CameraResolutionGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" +class IntrinsicCalibrationResultGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_get" @staticmethod def type_identifier() -> str: - return CameraResolutionGetRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationResultGetResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + intrinsic_calibration: IntrinsicCalibration = Field() -class CameraResolutionGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" + +class IntrinsicCalibrationResultGetActiveRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_active_get" @staticmethod def type_identifier() -> str: - return CameraResolutionGetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + return IntrinsicCalibrationResultGetActiveRequest._TYPE_IDENTIFIER - resolution: ImageResolution = Field() + parsable_type: str = Field(default=_TYPE_IDENTIFIER) -class DetectorFrameGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" +class IntrinsicCalibrationResultGetActiveResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_active_get" @staticmethod def type_identifier() -> str: - return DetectorFrameGetRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationResultGetActiveResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - include_detected: bool = Field(default=True) - include_rejected: bool = Field(default=True) + intrinsic_calibration: IntrinsicCalibration = Field() -class DetectorFrameGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" +class IntrinsicCalibrationResultMetadataListRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_metadata_list" @staticmethod def type_identifier() -> str: - return DetectorFrameGetResponse._TYPE_IDENTIFIER + return IntrinsicCalibrationResultMetadataListRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) - frame: DetectorFrame = Field() + image_resolution: ImageResolution = Field() -class DetectorStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_start" +class IntrinsicCalibrationResultMetadataListResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_metadata_list" @staticmethod def type_identifier() -> str: - return DetectorStartRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationResultMetadataListResponse._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + metadata_list: list[IntrinsicCalibrator.ResultMetadata] = Field(default_factory=list) -class DetectorStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_stop" + +class IntrinsicCalibrationResultMetadataUpdateRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_metadata_update" @staticmethod def type_identifier() -> str: - return DetectorStopRequest._TYPE_IDENTIFIER + return IntrinsicCalibrationResultMetadataUpdateRequest._TYPE_IDENTIFIER - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + result_identifier: str = Field() + result_state: IntrinsicCalibrator.ResultState = Field() + result_label: str | None = Field(default=None) diff --git a/src/detector/detector_app.py b/src/detector/app.py similarity index 89% rename from src/detector/detector_app.py rename to src/detector/app.py index d9f27e7..c36ce9d 100644 --- a/src/detector/detector_app.py +++ b/src/detector/app.py @@ -1,5 +1,5 @@ from .api import \ - CalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultGetActiveResponse, \ CameraImageGetRequest, \ CameraImageGetResponse, \ CameraParametersGetResponse, \ @@ -9,8 +9,7 @@ AnnotatorParametersGetResponse, \ AnnotatorParametersSetRequest from .detector import \ - Detector, \ - DetectorConfiguration + Detector from src.common import \ Camera, \ Annotator, \ @@ -37,11 +36,11 @@ def create_app() -> FastAPI: detector_configuration_filepath: str = \ os.path.join(os.path.dirname(__file__), "..", "..", "data", "detector_config.json") - detector_configuration: DetectorConfiguration + detector_configuration: Detector.Configuration with open(detector_configuration_filepath, 'r') as infile: detector_configuration_file_contents: str = infile.read() detector_configuration_dict = hjson.loads(detector_configuration_file_contents) - detector_configuration = DetectorConfiguration(**detector_configuration_dict) + detector_configuration = Detector.Configuration(**detector_configuration_dict) # Eventually it would be preferable to put the initialization logic/mapping below into an abstract factory, # and allow end-users to register custom classes that are not necessarily shipped within this library. @@ -78,6 +77,17 @@ def create_app() -> FastAPI: allow_methods=["*"], allow_headers=["*"]) + @detector_app.get("/annotator/get_parameters") + async def annotator_get_parameters() -> AnnotatorParametersGetResponse | ErrorResponse: + return detector.annotator_parameters_get() + + @detector_app.post("/annotator/set_parameters") + async def annotator_set_parameters( + request: AnnotatorParametersSetRequest + ) -> EmptyResponse | ErrorResponse: + return detector.annotator_parameters_set( + request=request) + @detector_app.head("/detector/start") async def detector_start() -> None: detector.detector_start() @@ -114,7 +124,7 @@ async def get_timestamp( return detector.timestamp_get() @detector_app.get("/calibration/get_result_active") - async def calibration_get_result_active() -> CalibrationResultGetActiveResponse: + async def calibration_get_result_active() -> IntrinsicCalibrationResultGetActiveResponse: return detector.calibration_result_get_active() @detector_app.get("/camera/get_image") @@ -135,17 +145,6 @@ async def camera_get_parameters() -> CameraParametersGetResponse: async def camera_get_resolution() -> CameraResolutionGetResponse: return detector.camera_resolution_get() - @detector_app.get("/marker/get_parameters") - async def marker_get_parameters() -> AnnotatorParametersGetResponse | ErrorResponse: - return detector.marker_parameters_get() - - @detector_app.post("/marker/set_parameters") - async def marker_set_parameters( - request: AnnotatorParametersSetRequest - ) -> EmptyResponse | ErrorResponse: - return detector.marker_parameters_set( - request=request) - @detector_app.websocket("/websocket") async def websocket_handler(websocket: WebSocket) -> None: await detector.websocket_handler(websocket=websocket) diff --git a/src/detector/detector.py b/src/detector/detector.py index 9666d1e..933b2fc 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -2,25 +2,25 @@ AnnotatorParametersGetRequest, \ AnnotatorParametersGetResponse, \ AnnotatorParametersSetRequest, \ - CalibrationCalculateRequest, \ - CalibrationCalculateResponse, \ - CalibrationDeleteStagedRequest, \ - CalibrationImageAddRequest, \ - CalibrationImageAddResponse, \ - CalibrationImageGetRequest, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListRequest, \ - CalibrationImageMetadataListResponse, \ - CalibrationImageMetadataUpdateRequest, \ - CalibrationResolutionListRequest, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetRequest, \ - CalibrationResultGetResponse, \ - CalibrationResultGetActiveRequest, \ - CalibrationResultGetActiveResponse, \ - CalibrationResultMetadataListRequest, \ - CalibrationResultMetadataListResponse, \ - CalibrationResultMetadataUpdateRequest, \ + IntrinsicCalibrationCalculateRequest, \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationDeleteStagedRequest, \ + IntrinsicCalibrationImageAddRequest, \ + IntrinsicCalibrationImageAddResponse, \ + IntrinsicCalibrationImageGetRequest, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListRequest, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationImageMetadataUpdateRequest, \ + IntrinsicCalibrationResolutionListRequest, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetRequest, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse, \ + IntrinsicCalibrationResultMetadataListRequest, \ + IntrinsicCalibrationResultMetadataListResponse, \ + IntrinsicCalibrationResultMetadataUpdateRequest, \ CameraImageGetRequest, \ CameraImageGetResponse, \ CameraParametersGetRequest, \ @@ -62,18 +62,18 @@ _ROLE_LABEL: Final[str] = "detector" -class DetectorConfiguration(BaseModel): - """ - Top-level schema for Detector initialization data - """ - calibrator_configuration: IntrinsicCalibrator.Configuration = Field() - camera_configuration: Camera.Configuration = Field() - annotator_configuration: Annotator.Configuration = Field() - - +# noinspection DuplicatedCode class Detector(MCTComponent): - _detector_configuration: DetectorConfiguration + class Configuration(BaseModel): + """ + Top-level schema for Detector initialization data + """ + calibrator_configuration: IntrinsicCalibrator.Configuration = Field() + camera_configuration: Camera.Configuration = Field() + annotator_configuration: Annotator.Configuration = Field() + + _configuration: Configuration _calibrator: IntrinsicCalibrator _camera: Camera @@ -83,7 +83,7 @@ class Detector(MCTComponent): def __init__( self, - detector_configuration: DetectorConfiguration, + detector_configuration: Configuration, camera_type: type[Camera], annotator_type: type[Annotator] ): @@ -91,7 +91,7 @@ def __init__( status_source_label="detector", send_status_messages_to_logger=True) - self._detector_configuration = detector_configuration + self._configuration = detector_configuration self._calibrator = IntrinsicCalibrator( configuration=detector_configuration.calibrator_configuration) self._camera = camera_type( @@ -105,11 +105,38 @@ def __init__( def __del__(self): self._camera.__del__() - def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | ErrorResponse: - request: CalibrationCalculateRequest = self.get_kwarg( + def annotator_parameters_get( + self, + **_kwargs + ) -> AnnotatorParametersGetResponse | ErrorResponse: + try: + parameters = self._annotator.get_parameters() + except MCTAnnotatorRuntimeError as e: + return ErrorResponse(message=e.message) + return AnnotatorParametersGetResponse(parameters=parameters) + + def annotator_parameters_set( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: AnnotatorParametersSetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=AnnotatorParametersSetRequest) + try: + self._annotator.set_parameters(parameters=request.parameters) + except MCTAnnotatorRuntimeError as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def calibration_calculate( + self, + **kwargs + ) -> IntrinsicCalibrationCalculateResponse | ErrorResponse: + request: IntrinsicCalibrationCalculateRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationCalculateRequest) + arg_type=IntrinsicCalibrationCalculateRequest) result_identifier: str intrinsic_calibration: IntrinsicCalibration try: @@ -118,11 +145,14 @@ def calibration_calculate(self, **kwargs) -> CalibrationCalculateResponse | Erro except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationCalculateResponse( + return IntrinsicCalibrationCalculateResponse( result_identifier=result_identifier, intrinsic_calibration=intrinsic_calibration) - def calibration_delete_staged(self, **_kwargs) -> EmptyResponse | ErrorResponse: + def calibration_delete_staged( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: try: self._calibrator.delete_staged() except MCTCalibrationError as e: @@ -130,7 +160,10 @@ def calibration_delete_staged(self, **_kwargs) -> EmptyResponse | ErrorResponse: return ErrorResponse(message=e.public_message) return EmptyResponse() - def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | ErrorResponse: + def calibration_image_add( + self, + **_kwargs + ) -> IntrinsicCalibrationImageAddResponse | ErrorResponse: try: image_base64: str = self._camera.get_encoded_image( image_format=ImageFormat.FORMAT_PNG, @@ -139,26 +172,32 @@ def calibration_image_add(self, **_kwargs) -> CalibrationImageAddResponse | Erro except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationImageAddResponse(image_identifier=image_identifier) + return IntrinsicCalibrationImageAddResponse(image_identifier=image_identifier) - def calibration_image_get(self, **kwargs) -> CalibrationImageGetResponse | ErrorResponse: - request: CalibrationImageGetRequest = self.get_kwarg( + def calibration_image_get( + self, + **kwargs + ) -> IntrinsicCalibrationImageGetResponse | ErrorResponse: + request: IntrinsicCalibrationImageGetRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationImageGetRequest) + arg_type=IntrinsicCalibrationImageGetRequest) image_base64: str try: - image_base64 = self._calibrator.load_image(identifier=request.image_identifier) + image_base64 = self._calibrator.get_image_by_identifier(identifier=request.image_identifier) except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationImageGetResponse(image_base64=image_base64) + return IntrinsicCalibrationImageGetResponse(image_base64=image_base64) - def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataListResponse | ErrorResponse: - request: CalibrationImageMetadataListRequest = self.get_kwarg( + def calibration_image_metadata_list( + self, + **kwargs + ) -> IntrinsicCalibrationImageMetadataListResponse | ErrorResponse: + request: IntrinsicCalibrationImageMetadataListRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationImageMetadataListRequest) + arg_type=IntrinsicCalibrationImageMetadataListRequest) image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] try: image_metadata_list = self._calibrator.list_image_metadata_by_image_resolution( @@ -166,13 +205,16 @@ def calibration_image_metadata_list(self, **kwargs) -> CalibrationImageMetadataL except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationImageMetadataListResponse(metadata_list=image_metadata_list) + return IntrinsicCalibrationImageMetadataListResponse(metadata_list=image_metadata_list) - def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationImageMetadataUpdateRequest = self.get_kwarg( + def calibration_image_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: IntrinsicCalibrationImageMetadataUpdateRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationImageMetadataUpdateRequest) + arg_type=IntrinsicCalibrationImageMetadataUpdateRequest) try: self._calibrator.update_image_metadata( image_identifier=request.image_identifier, @@ -183,29 +225,38 @@ def calibration_image_metadata_update(self, **kwargs) -> EmptyResponse | ErrorRe return ErrorResponse(message=e.public_message) return EmptyResponse() - def calibration_resolution_list(self, **_kwargs) -> CalibrationResolutionListResponse | ErrorResponse: + def calibration_resolution_list( + self, + **_kwargs + ) -> IntrinsicCalibrationResolutionListResponse | ErrorResponse: resolutions: list[ImageResolution] try: resolutions = self._calibrator.list_resolutions() except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationResolutionListResponse(resolutions=resolutions) + return IntrinsicCalibrationResolutionListResponse(resolutions=resolutions) - def calibration_result_get(self, **kwargs) -> CalibrationResultGetResponse | ErrorResponse: - request: CalibrationResultGetRequest = self.get_kwarg( + def calibration_result_get( + self, + **kwargs + ) -> IntrinsicCalibrationResultGetResponse | ErrorResponse: + request: IntrinsicCalibrationResultGetRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationResultGetRequest) + arg_type=IntrinsicCalibrationResultGetRequest) intrinsic_calibration: IntrinsicCalibration try: intrinsic_calibration = self._calibrator.get_result(result_identifier=request.result_identifier) except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationResultGetResponse(intrinsic_calibration=intrinsic_calibration) + return IntrinsicCalibrationResultGetResponse(intrinsic_calibration=intrinsic_calibration) - def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActiveResponse | ErrorResponse: + def calibration_result_get_active( + self, + **_kwargs + ) -> IntrinsicCalibrationResultGetActiveResponse | ErrorResponse: intrinsic_calibration: IntrinsicCalibration | None try: image_resolution: ImageResolution = self._camera.get_resolution() @@ -213,13 +264,16 @@ def calibration_result_get_active(self, **_kwargs) -> CalibrationResultGetActive except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) + return IntrinsicCalibrationResultGetActiveResponse(intrinsic_calibration=intrinsic_calibration) - def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadataListResponse | ErrorResponse: - request: CalibrationResultMetadataListRequest = self.get_kwarg( + def calibration_result_metadata_list( + self, + **kwargs + ) -> IntrinsicCalibrationResultMetadataListResponse | ErrorResponse: + request: IntrinsicCalibrationResultMetadataListRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationResultMetadataListRequest) + arg_type=IntrinsicCalibrationResultMetadataListRequest) result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] try: result_metadata_list = self._calibrator.list_result_metadata_by_image_resolution( @@ -227,24 +281,30 @@ def calibration_result_metadata_list(self, **kwargs) -> CalibrationResultMetadat except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) - return CalibrationResultMetadataListResponse(metadata_list=result_metadata_list) + return IntrinsicCalibrationResultMetadataListResponse(metadata_list=result_metadata_list) - def calibration_result_metadata_update(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: CalibrationResultMetadataUpdateRequest = self.get_kwarg( + def calibration_result_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: IntrinsicCalibrationResultMetadataUpdateRequest = self.get_kwarg( kwargs=kwargs, key="request", - arg_type=CalibrationResultMetadataUpdateRequest) + arg_type=IntrinsicCalibrationResultMetadataUpdateRequest) try: self._calibrator.update_result_metadata( - identifier=request.result_identifier, - state=request.result_state, + result_identifier=request.result_identifier, + result_state=request.result_state, result_label=request.result_label) except MCTCalibrationError as e: logger.error(e.private_message) return ErrorResponse(message=e.public_message) return EmptyResponse() - def camera_image_get(self, **kwargs) -> CameraImageGetResponse | ErrorResponse: + def camera_image_get( + self, + **kwargs + ) -> CameraImageGetResponse | ErrorResponse: request: CameraImageGetRequest = self.get_kwarg( kwargs=kwargs, key="request", @@ -260,7 +320,10 @@ def camera_image_get(self, **kwargs) -> CameraImageGetResponse | ErrorResponse: format=request.format, image_base64=encoded_image_base64) - def camera_parameters_get(self, **_kwargs) -> CameraParametersGetResponse | ErrorResponse: + def camera_parameters_get( + self, + **_kwargs + ) -> CameraParametersGetResponse | ErrorResponse: parameters: list[KeyValueMetaAbstract] try: parameters = self._camera.get_parameters() @@ -268,7 +331,10 @@ def camera_parameters_get(self, **_kwargs) -> CameraParametersGetResponse | Erro return ErrorResponse(message=e.message) return CameraParametersGetResponse(parameters=parameters) - def camera_parameters_set(self, **kwargs) -> CameraParametersSetResponse | ErrorResponse: + def camera_parameters_set( + self, + **kwargs + ) -> CameraParametersSetResponse | ErrorResponse: request: CameraParametersSetRequest = self.get_kwarg( kwargs=kwargs, key="request", @@ -281,7 +347,10 @@ def camera_parameters_set(self, **kwargs) -> CameraParametersSetResponse | Error return ErrorResponse(message=e.message) return CameraParametersSetResponse(resolution=new_resolution) - def camera_resolution_get(self, **_kwargs) -> CameraResolutionGetResponse | ErrorResponse: + def camera_resolution_get( + self, + **_kwargs + ) -> CameraResolutionGetResponse | ErrorResponse: image_resolution: ImageResolution try: image_resolution = self._camera.get_resolution() @@ -289,7 +358,10 @@ def camera_resolution_get(self, **_kwargs) -> CameraResolutionGetResponse | Erro return ErrorResponse(message=e.message) return CameraResolutionGetResponse(resolution=image_resolution) - def detector_frame_get(self, **kwargs) -> DetectorFrameGetResponse | ErrorResponse: + def detector_frame_get( + self, + **kwargs + ) -> DetectorFrameGetResponse | ErrorResponse: request: DetectorFrameGetRequest = self.get_kwarg( kwargs=kwargs, key="request", @@ -308,14 +380,20 @@ def detector_frame_get(self, **kwargs) -> DetectorFrameGetResponse | ErrorRespon return ErrorResponse(message=e.message) return DetectorFrameGetResponse(frame=detector_frame) - def detector_start(self, **_kwargs) -> EmptyResponse | ErrorResponse: + def detector_start( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: try: self._camera.start() except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return EmptyResponse() - def detector_stop(self, **_kwargs) -> EmptyResponse | ErrorResponse: + def detector_stop( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: try: self._camera.stop() except MCTCameraRuntimeError as e: @@ -326,47 +404,29 @@ def detector_stop(self, **_kwargs) -> EmptyResponse | ErrorResponse: def get_role_label(): return _ROLE_LABEL - def marker_parameters_get(self, **_kwargs) -> AnnotatorParametersGetResponse | ErrorResponse: - try: - parameters = self._annotator.get_parameters() - except MCTAnnotatorRuntimeError as e: - return ErrorResponse(message=e.message) - return AnnotatorParametersGetResponse(parameters=parameters) - - def marker_parameters_set(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: AnnotatorParametersSetRequest = self.get_kwarg( - kwargs=kwargs, - key="request", - arg_type=AnnotatorParametersSetRequest) - try: - self._annotator.set_parameters(parameters=request.parameters) - except MCTAnnotatorRuntimeError as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() return_value.update({ DetectorFrameGetRequest: self.detector_frame_get, DetectorStartRequest: self.detector_start, DetectorStopRequest: self.detector_stop, - CalibrationCalculateRequest: self.calibration_calculate, - CalibrationDeleteStagedRequest: self.calibration_delete_staged, - CalibrationImageAddRequest: self.calibration_image_add, - CalibrationImageGetRequest: self.calibration_image_get, - CalibrationImageMetadataListRequest: self.calibration_image_metadata_list, - CalibrationImageMetadataUpdateRequest: self.calibration_image_metadata_update, - CalibrationResolutionListRequest: self.calibration_resolution_list, - CalibrationResultGetRequest: self.calibration_result_get, - CalibrationResultGetActiveRequest: self.calibration_result_get_active, - CalibrationResultMetadataListRequest: self.calibration_result_metadata_list, - CalibrationResultMetadataUpdateRequest: self.calibration_result_metadata_update, + IntrinsicCalibrationCalculateRequest: self.calibration_calculate, + IntrinsicCalibrationDeleteStagedRequest: self.calibration_delete_staged, + IntrinsicCalibrationImageAddRequest: self.calibration_image_add, + IntrinsicCalibrationImageGetRequest: self.calibration_image_get, + IntrinsicCalibrationImageMetadataListRequest: self.calibration_image_metadata_list, + IntrinsicCalibrationImageMetadataUpdateRequest: self.calibration_image_metadata_update, + IntrinsicCalibrationResolutionListRequest: self.calibration_resolution_list, + IntrinsicCalibrationResultGetRequest: self.calibration_result_get, + IntrinsicCalibrationResultGetActiveRequest: self.calibration_result_get_active, + IntrinsicCalibrationResultMetadataListRequest: self.calibration_result_metadata_list, + IntrinsicCalibrationResultMetadataUpdateRequest: self.calibration_result_metadata_update, CameraImageGetRequest: self.camera_image_get, CameraParametersGetRequest: self.camera_parameters_get, CameraParametersSetRequest: self.camera_parameters_set, CameraResolutionGetRequest: self.camera_resolution_get, - AnnotatorParametersGetRequest: self.marker_parameters_get, - AnnotatorParametersSetRequest: self.marker_parameters_set}) + AnnotatorParametersGetRequest: self.annotator_parameters_get, + AnnotatorParametersSetRequest: self.annotator_parameters_set}) return return_value async def update(self): diff --git a/src/gui/panels/board_builder_panel.py b/src/gui/panels/board_builder_panel.py index 234cedd..1b5f8a4 100644 --- a/src/gui/panels/board_builder_panel.py +++ b/src/gui/panels/board_builder_panel.py @@ -20,13 +20,13 @@ ImageUtils, \ Matrix4x4, \ Pose, \ - PoseSolverFrame, \ + MixerFrame, \ StatusMessageSource from src.controller import MCTController from src.detector.api import \ CameraImageGetRequest, \ CameraImageGetResponse, \ - CalibrationResultGetActiveResponse + IntrinsicCalibrationResultGetActiveResponse from src.gui.panels.detector_panel import _CAPTURE_FORMAT import cv2 import datetime @@ -82,7 +82,7 @@ def __init__( _tracked_target_poses: list[Pose] # This could maybe be added to the LiveDetectorPreview class - _latest_pose_solver_frames: dict[str, PoseSolverFrame] + _latest_pose_solver_frames: dict[str, MixerFrame] live_detector_previews: list[LiveDetectorPreview] def __init__( @@ -345,7 +345,7 @@ def handle_response_series( ) -> None: response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationResultGetActiveResponse): + if isinstance(response, IntrinsicCalibrationResultGetActiveResponse): self._handle_calibration_result_get_active_response( response=response, responder=response_series.responder) @@ -421,7 +421,7 @@ def _draw_all_corners(self, annotations, scale, frame, color): def _handle_calibration_result_get_active_response( self, - response: CalibrationResultGetActiveResponse, + response: IntrinsicCalibrationResultGetActiveResponse, responder: str ) -> None: if response: @@ -599,7 +599,7 @@ def _process_frame(self, preview: LiveDetectorPreview): image_panel.paint() def _render_frame(self, detector_poses, target_poses): - pose_solver_frame = PoseSolverFrame( + pose_solver_frame = MixerFrame( detector_poses=detector_poses, target_poses=target_poses, timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat() diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/calibrator_panel.py index c160a76..5aa16be 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/calibrator_panel.py @@ -21,21 +21,21 @@ from src.controller import \ MCTController from src.detector import \ - CalibrationCalculateRequest, \ - CalibrationCalculateResponse, \ - CalibrationDeleteStagedRequest, \ - CalibrationImageGetRequest, \ - CalibrationImageGetResponse, \ - CalibrationImageMetadataListRequest, \ - CalibrationImageMetadataListResponse, \ - CalibrationImageMetadataUpdateRequest, \ - CalibrationResolutionListRequest, \ - CalibrationResolutionListResponse, \ - CalibrationResultGetRequest, \ - CalibrationResultGetResponse, \ - CalibrationResultMetadataListRequest, \ - CalibrationResultMetadataListResponse, \ - CalibrationResultMetadataUpdateRequest + IntrinsicCalibrationCalculateRequest, \ + IntrinsicCalibrationCalculateResponse, \ + IntrinsicCalibrationDeleteStagedRequest, \ + IntrinsicCalibrationImageGetRequest, \ + IntrinsicCalibrationImageGetResponse, \ + IntrinsicCalibrationImageMetadataListRequest, \ + IntrinsicCalibrationImageMetadataListResponse, \ + IntrinsicCalibrationImageMetadataUpdateRequest, \ + IntrinsicCalibrationResolutionListRequest, \ + IntrinsicCalibrationResolutionListResponse, \ + IntrinsicCalibrationResultGetRequest, \ + IntrinsicCalibrationResultGetResponse, \ + IntrinsicCalibrationResultMetadataListRequest, \ + IntrinsicCalibrationResultMetadataListResponse, \ + IntrinsicCalibrationResultMetadataUpdateRequest from io import BytesIO import logging from typing import Optional @@ -278,17 +278,17 @@ def handle_response_series( ) -> None: response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationCalculateResponse): + if isinstance(response, IntrinsicCalibrationCalculateResponse): self._handle_response_calibrate(response=response) - elif isinstance(response, CalibrationImageGetResponse): + elif isinstance(response, IntrinsicCalibrationImageGetResponse): self._handle_response_get_calibration_image(response=response) - elif isinstance(response, CalibrationResultGetResponse): + elif isinstance(response, IntrinsicCalibrationResultGetResponse): self._handle_response_get_calibration_result(response=response) - elif isinstance(response, CalibrationResolutionListResponse): + elif isinstance(response, IntrinsicCalibrationResolutionListResponse): self._handle_response_list_calibration_detector_resolutions(response=response) - elif isinstance(response, CalibrationImageMetadataListResponse): + elif isinstance(response, IntrinsicCalibrationImageMetadataListResponse): self._handle_response_list_calibration_image_metadata(response=response) - elif isinstance(response, CalibrationResultMetadataListResponse): + elif isinstance(response, IntrinsicCalibrationResultMetadataListResponse): self._handle_response_list_calibration_result_metadata(response=response) elif isinstance(response, ErrorResponse): self.handle_error_response(response=response) @@ -320,7 +320,7 @@ def update_loop(self) -> None: def _handle_response_calibrate( self, - response: CalibrationCalculateResponse + response: IntrinsicCalibrationCalculateResponse ) -> None: if not self._calibration_in_progress: self.status_message_source.enqueue_status_message( @@ -336,7 +336,7 @@ def _handle_response_calibrate( def _handle_response_get_calibration_image( self, - response: CalibrationImageGetResponse + response: IntrinsicCalibrationImageGetResponse ) -> None: opencv_image = ImageUtils.base64_to_image(input_base64=response.image_base64) opencv_image = ImageUtils.image_resize_to_fit( @@ -351,13 +351,13 @@ def _handle_response_get_calibration_image( def _handle_response_get_calibration_result( self, - response: CalibrationResultGetResponse + response: IntrinsicCalibrationResultGetResponse ) -> None: self._result_display_textbox.SetValue(str(response.intrinsic_calibration.model_dump_json(indent=4))) def _handle_response_list_calibration_detector_resolutions( self, - response: CalibrationResolutionListResponse + response: IntrinsicCalibrationResolutionListResponse ) -> None: self._detector_resolutions = sorted(response.resolutions) self._detector_resolution_selector.set_options([str(res) for res in self._detector_resolutions]) @@ -365,14 +365,14 @@ def _handle_response_list_calibration_detector_resolutions( def _handle_response_list_calibration_image_metadata( self, - response: CalibrationImageMetadataListResponse + response: IntrinsicCalibrationImageMetadataListResponse ) -> None: self._image_metadata_list = response.metadata_list self._image_table.update_contents(row_contents=self._image_metadata_list) def _handle_response_list_calibration_result_metadata( self, - response: CalibrationResultMetadataListResponse + response: IntrinsicCalibrationResultMetadataListResponse ) -> None: self._result_metadata_list = response.metadata_list self._result_table.update_contents(row_contents=self._result_metadata_list) @@ -388,9 +388,9 @@ def _on_calibrate_pressed(self, _event: wx.CommandEvent) -> None: selected_image_resolution: ImageResolution = \ ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationCalculateRequest( + IntrinsicCalibrationCalculateRequest( image_resolution=selected_image_resolution), - CalibrationResultMetadataListRequest( + IntrinsicCalibrationResultMetadataListRequest( image_resolution=selected_image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, @@ -405,7 +405,7 @@ def _on_detector_selected(self, _event: wx.CommandEvent) -> None: self._calibrate_status_textbox.SetValue(str()) self._result_display_textbox.SetValue(str()) detector_label: str = self._detector_selector.selector.GetStringSelection() - request_series: MCTRequestSeries = MCTRequestSeries(series=[CalibrationResolutionListRequest()]) + request_series: MCTRequestSeries = MCTRequestSeries(series=[IntrinsicCalibrationResolutionListRequest()]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, request_series=request_series) @@ -420,9 +420,9 @@ def _on_detector_load_pressed(self, _event: wx.CommandEvent) -> None: selected_image_resolution: ImageResolution = \ ImageResolution.from_str(self._detector_resolution_selector.selector.GetStringSelection()) request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationImageMetadataListRequest( + IntrinsicCalibrationImageMetadataListRequest( image_resolution=selected_image_resolution), - CalibrationResultMetadataListRequest( + IntrinsicCalibrationResultMetadataListRequest( image_resolution=selected_image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, @@ -451,7 +451,7 @@ def _on_image_metadata_selected(self, _event: wx.grid.GridEvent) -> None: image_identifier: str | None = self._image_metadata_list[image_index].identifier if image_identifier is not None: request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationImageGetRequest(image_identifier=image_identifier)]) + IntrinsicCalibrationImageGetRequest(image_identifier=image_identifier)]) detector_label: str = self._detector_selector.selector.GetStringSelection() self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, @@ -469,12 +469,12 @@ def _on_image_update_pressed(self, _event: wx.CommandEvent) -> None: IntrinsicCalibrator.ImageState[self._image_state_selector.selector.GetStringSelection()] image_label: str = self._image_label_textbox.textbox.GetValue() request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationImageMetadataUpdateRequest( + IntrinsicCalibrationImageMetadataUpdateRequest( image_identifier=image_identifier, image_state=image_state, image_label=image_label), - CalibrationDeleteStagedRequest(), - CalibrationImageMetadataListRequest( + IntrinsicCalibrationDeleteStagedRequest(), + IntrinsicCalibrationImageMetadataListRequest( image_resolution=image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, @@ -489,7 +489,7 @@ def _on_result_metadata_selected(self, _event: wx.grid.GridEvent) -> None: result_identifier: str | None = self._result_metadata_list[result_index].identifier if result_identifier is not None: request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationResultGetRequest(result_identifier=result_identifier)]) + IntrinsicCalibrationResultGetRequest(result_identifier=result_identifier)]) detector_label: str = self._detector_selector.selector.GetStringSelection() self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, @@ -507,12 +507,12 @@ def _on_result_update_pressed(self, _event: wx.CommandEvent) -> None: IntrinsicCalibrator.ResultState[self._result_state_selector.selector.GetStringSelection()] result_label: str = self._result_label_textbox.textbox.GetValue() request_series: MCTRequestSeries = MCTRequestSeries(series=[ - CalibrationResultMetadataUpdateRequest( + IntrinsicCalibrationResultMetadataUpdateRequest( result_identifier=result_identifier, result_state=result_state, result_label=result_label), - CalibrationDeleteStagedRequest(), - CalibrationResultMetadataListRequest( + IntrinsicCalibrationDeleteStagedRequest(), + IntrinsicCalibrationResultMetadataListRequest( image_resolution=image_resolution)]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=detector_label, diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index 1e2384c..8594714 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -23,8 +23,8 @@ from src.controller import \ MCTController from src.detector.api import \ - CalibrationImageAddRequest, \ - CalibrationImageAddResponse, \ + IntrinsicCalibrationImageAddRequest, \ + IntrinsicCalibrationImageAddResponse, \ CameraImageGetRequest, \ CameraImageGetResponse, \ CameraParametersGetRequest, \ @@ -278,7 +278,7 @@ def __init__( def begin_capture_calibration(self) -> None: selected_detector_label: str = self._detector_selector.selector.GetStringSelection() - request_series: MCTRequestSeries = MCTRequestSeries(series=[CalibrationImageAddRequest()]) + request_series: MCTRequestSeries = MCTRequestSeries(series=[IntrinsicCalibrationImageAddRequest()]) self._control_blocking_request_id = self._controller.request_series_push( connection_label=selected_detector_label, request_series=request_series) @@ -338,7 +338,7 @@ def handle_response_series( ) -> None: response: MCTResponse for response in response_series.series: - if isinstance(response, CalibrationImageAddResponse): + if isinstance(response, IntrinsicCalibrationImageAddResponse): self._handle_add_calibration_image_response(response=response) elif isinstance(response, CameraImageGetResponse): self._handle_capture_snapshot_response(response=response) @@ -353,7 +353,7 @@ def handle_response_series( def _handle_add_calibration_image_response( self, - response: CalibrationImageAddResponse + response: IntrinsicCalibrationImageAddResponse ): self.status_message_source.enqueue_status_message( severity="info", diff --git a/src/gui/panels/pose_solver_panel.py b/src/gui/panels/pose_solver_panel.py index 0e25508..423fa44 100644 --- a/src/gui/panels/pose_solver_panel.py +++ b/src/gui/panels/pose_solver_panel.py @@ -16,7 +16,7 @@ StatusMessageSource, \ SeverityLabel, \ Pose, \ - PoseSolverFrame + MixerFrame from src.controller import \ MCTController import datetime @@ -43,7 +43,7 @@ class PoseSolverPanel(BasePanel): _control_blocking_request_id: uuid.UUID | None _is_updating: bool _latest_detector_frames: dict[str, DetectorFrame] # last frame for each detector - _latest_pose_solver_frames: dict[str, PoseSolverFrame] + _latest_pose_solver_frames: dict[str, MixerFrame] _target_id_to_label: dict[str, str] _tracked_target_poses: list[Pose] @@ -214,11 +214,11 @@ def update_loop(self) -> None: new_poses_available: bool = False pose_solver_labels: list[str] = self._controller.get_active_pose_solver_labels() for pose_solver_label in pose_solver_labels: - retrieved_pose_solver_frame: PoseSolverFrame = self._controller.get_live_pose_solver_frame( + retrieved_pose_solver_frame: MixerFrame = self._controller.get_live_pose_solver_frame( pose_solver_label=pose_solver_label) retrieved_pose_solver_frame_timestamp: datetime.datetime = retrieved_pose_solver_frame.timestamp_utc() if pose_solver_label in self._latest_pose_solver_frames: - latest_pose_solver_frame: PoseSolverFrame = self._latest_pose_solver_frames[pose_solver_label] + latest_pose_solver_frame: MixerFrame = self._latest_pose_solver_frames[pose_solver_label] latest_pose_solver_frame_timestamp: datetime.datetime = latest_pose_solver_frame.timestamp_utc() if retrieved_pose_solver_frame_timestamp > latest_pose_solver_frame_timestamp: self._latest_pose_solver_frames[pose_solver_label] = retrieved_pose_solver_frame diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py index 4005341..9f86f1b 100644 --- a/src/implementations/extrinsic_charuco_opencv.py +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -146,7 +146,6 @@ def _annotate_image( def _calculate_implementation( self, - detector_intrinsics_by_label: dict[str, IntrinsicParameters], image_metadata_list: list[ExtrinsicCalibrator.ImageMetadata] ) -> tuple[ExtrinsicCalibration, list[ExtrinsicCalibrator.ImageMetadata]]: charuco_spec: ArucoOpenCVCommon.CharucoBoard = ArucoOpenCVCommon.CharucoBoard() @@ -177,7 +176,7 @@ def _calculate_implementation( except IndexError: detector: _DetectorData = _DetectorData( detector_label=metadata.detector_label, - intrinsic_parameters=detector_intrinsics_by_label[metadata.detector_label]) + intrinsic_parameters=self.detector_intrinsics_by_label[metadata.detector_label]) data.detectors.append(detector) for annotation in annotations: try: @@ -197,7 +196,7 @@ def _calculate_implementation( image_data: _ImageData = data.get_image_container( timestamp_utc_iso8601=metadata.timestamp_utc_iso8601, detector_label=metadata.detector_label) - intrinsic_parameters: IntrinsicParameters = detector_intrinsics_by_label[metadata.detector_label] + intrinsic_parameters: IntrinsicParameters = self.detector_intrinsics_by_label[metadata.detector_label] reference_to_initial: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( annotations=image_data.annotations, landmarks=charuco_target.landmarks, diff --git a/src/main_detector.py b/src/main_detector.py index 032b343..498e7bc 100644 --- a/src/main_detector.py +++ b/src/main_detector.py @@ -1,4 +1,4 @@ -from src.detector.detector_app import app +from src.detector.app import app import logging import uvicorn diff --git a/src/pose_solver/__init__.py b/src/mixer/__init__.py similarity index 69% rename from src/pose_solver/__init__.py rename to src/mixer/__init__.py index d7e596a..7523cbd 100644 --- a/src/pose_solver/__init__.py +++ b/src/mixer/__init__.py @@ -5,9 +5,9 @@ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ PoseSolverSetExtrinsicRequest, \ - PoseSolverSetIntrinsicRequest, \ + MixerUpdateIntrinsicParametersRequest, \ PoseSolverSetReferenceRequest, \ PoseSolverSetTargetsRequest, \ - PoseSolverStartRequest, \ - PoseSolverStopRequest -from.pose_solver_api import PoseSolverAPI + MixerStartRequest, \ + MixerStopRequest +from.mixer import Mixer diff --git a/src/mixer/api.py b/src/mixer/api.py new file mode 100644 index 0000000..9269465 --- /dev/null +++ b/src/mixer/api.py @@ -0,0 +1,354 @@ +from src.common import \ + DetectorFrame, \ + ExtrinsicCalibration, \ + ExtrinsicCalibrator, \ + IntrinsicParameters, \ + Matrix4x4, \ + MCTRequest, \ + MCTResponse, \ + Pose, \ + Target +from pydantic import Field +from typing import Final, Literal + + +class ExtrinsicCalibrationCalculateRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_calculate" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationCalculateRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + +class ExtrinsicCalibrationCalculateResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_calculate" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationCalculateResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + result_identifier: str = Field() + extrinsic_calibration: ExtrinsicCalibration = Field() + + +class ExtrinsicCalibrationDeleteStagedRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_delete_staged" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationDeleteStagedRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + +class ExtrinsicCalibrationImageAddRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_add" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationImageAddRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + image_base64: str = Field() + detector_label: str = Field() + timestamp_utc_iso8601: str = Field() + + +class ExtrinsicCalibrationImageAddResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_add" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationImageAddResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + image_identifier: str = Field() + + +class ExtrinsicCalibrationImageGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_get" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationImageGetRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + image_identifier: str = Field() + + +class ExtrinsicCalibrationImageGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_get" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationImageGetResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + image_base64: str = Field() + + +class ExtrinsicCalibrationImageMetadataListRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_metadata_list" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationImageMetadataListRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + +class ExtrinsicCalibrationImageMetadataListResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_metadata_list" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationImageMetadataListResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + metadata_list: list[ExtrinsicCalibrator.ImageMetadata] = Field(default_factory=list) + + +class ExtrinsicCalibrationImageMetadataUpdateRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_metadata_update" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationImageMetadataUpdateRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + image_identifier: str = Field() + image_state: ExtrinsicCalibrator.ImageState = Field() + image_label: str | None = Field(default=None) + + +class ExtrinsicCalibrationResultGetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_get" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationResultGetRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + result_identifier: str = Field() + + +class ExtrinsicCalibrationResultGetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_get" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationResultGetResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + extrinsic_calibration: ExtrinsicCalibration = Field() + + +class ExtrinsicCalibrationResultGetActiveRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_active_get" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationResultGetActiveRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + +class ExtrinsicCalibrationResultGetActiveResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_active_get" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationResultGetActiveResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + extrinsic_calibration: ExtrinsicCalibration = Field() + + +class ExtrinsicCalibrationResultMetadataListRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_metadata_list" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationResultMetadataListRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + +class ExtrinsicCalibrationResultMetadataListResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_metadata_list" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationResultMetadataListResponse._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + metadata_list: list[ExtrinsicCalibrator.ResultMetadata] = Field(default_factory=list) + + +class ExtrinsicCalibrationResultMetadataUpdateRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_metadata_update" + + @staticmethod + def type_identifier() -> str: + return ExtrinsicCalibrationResultMetadataUpdateRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + result_identifier: str = Field() + result_state: ExtrinsicCalibrator.ResultState = Field() + result_label: str | None = Field(default=None) + + +class PoseSolverAddDetectorFrameRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_add_marker_corners" + + @staticmethod + def type_identifier() -> str: + return PoseSolverAddDetectorFrameRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + detector_label: str = Field() + detector_frame: DetectorFrame = Field() + + +class PoseSolverAddTargetRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_add_target" + + @staticmethod + def type_identifier() -> str: + return PoseSolverAddTargetRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + target: Target = Field() + + +class PoseSolverAddTargetResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_add_target" + + @staticmethod + def type_identifier() -> str: + return PoseSolverAddTargetResponse._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + target_id: str = Field() + + +class PoseSolverGetPosesRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_get_poses" + + @staticmethod + def type_identifier() -> str: + return PoseSolverGetPosesRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class PoseSolverGetPosesResponse(MCTResponse): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_get_poses" + + @staticmethod + def type_identifier() -> str: + return PoseSolverGetPosesResponse._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + detector_poses: list[Pose] + target_poses: list[Pose] + + +class PoseSolverSetExtrinsicRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_set_extrinsic_parameters" + + @staticmethod + def type_identifier() -> str: + return PoseSolverSetExtrinsicRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + detector_label: str = Field() + transform_to_reference: Matrix4x4 = Field() + + +class PoseSolverSetReferenceRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_set_reference_marker" + + @staticmethod + def type_identifier() -> str: + return PoseSolverSetReferenceRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + marker_id: int = Field() + marker_diameter: float = Field() + + +class PoseSolverSetTargetsRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_set_targets" + + @staticmethod + def type_identifier() -> str: + return PoseSolverSetTargetsRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + targets: list[Target] = Field() + + +class MixerStartRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_start" + + @staticmethod + def type_identifier() -> str: + return MixerStartRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class MixerStopRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_stop" + + @staticmethod + def type_identifier() -> str: + return MixerStopRequest._TYPE_IDENTIFIER + + # noinspection PyTypeHints + parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + + +class MixerUpdateIntrinsicParametersRequest(MCTRequest): + _TYPE_IDENTIFIER: Final[str] = "mixer_update_intrinsic_parameters" + + @staticmethod + def type_identifier() -> str: + return MixerUpdateIntrinsicParametersRequest._TYPE_IDENTIFIER + + parsable_type: str = Field(default=_TYPE_IDENTIFIER) + + detector_label: str = Field() + intrinsic_parameters: IntrinsicParameters = Field() diff --git a/src/pose_solver/pose_solver_app.py b/src/mixer/app.py similarity index 75% rename from src/pose_solver/pose_solver_app.py rename to src/mixer/app.py index 36d0d9d..7cb8136 100644 --- a/src/pose_solver/pose_solver_app.py +++ b/src/mixer/app.py @@ -2,16 +2,13 @@ PoseSolverAddDetectorFrameRequest, \ PoseSolverAddTargetRequest, \ PoseSolverGetPosesResponse, \ - PoseSolverSetIntrinsicRequest -from .pose_solver import \ - PoseSolver -from .pose_solver_api import \ - PoseSolverAPI -from .structures import \ - PoseSolverConfiguration + MixerUpdateIntrinsicParametersRequest +from .mixer import \ + Mixer from src.common import \ EmptyResponse, \ - ErrorResponse + ErrorResponse, \ + PoseSolver import asyncio from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware @@ -28,12 +25,12 @@ def create_app() -> FastAPI: configuration_filepath: str = os.path.join( os.path.dirname(__file__), "..", "..", "data", "pose_solver_config.json") - configuration: PoseSolverConfiguration + configuration: Mixer.Configuration with open(configuration_filepath, 'r') as infile: file_contents: str = infile.read() configuration_dict = hjson.loads(file_contents) - configuration = PoseSolverConfiguration(**configuration_dict) - pose_solver_api = PoseSolverAPI( + configuration = Mixer.Configuration(**configuration_dict) + pose_solver_api = Mixer( configuration=configuration, pose_solver=pose_solver) pose_solver_app = FastAPI() @@ -51,31 +48,31 @@ def create_app() -> FastAPI: async def add_marker_corners( request: PoseSolverAddDetectorFrameRequest ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.add_detector_frame(request=request) + return pose_solver_api.pose_solver_add_detector_frame(request=request) @pose_solver_app.post("/add_target") async def add_target_marker( request: PoseSolverAddTargetRequest ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.add_target(request=request) + return pose_solver_api.pose_solver_add_target(request=request) @pose_solver_app.get("/get_poses") async def get_poses() -> PoseSolverGetPosesResponse | ErrorResponse: - return pose_solver_api.get_poses() + return pose_solver_api.pose_solver_get_poses() @pose_solver_app.post("/set_intrinsic_parameters") async def set_intrinsic_parameters( - request: PoseSolverSetIntrinsicRequest + request: MixerUpdateIntrinsicParametersRequest ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.set_intrinsic_parameters(request=request) + return pose_solver_api.mixer_update_intrinsic_parameters(request=request) @pose_solver_app.head("/start_capture") async def start_capture() -> None: - pose_solver_api.start_pose_solver() + pose_solver_api.mixer_start() @pose_solver_app.head("/stop_capture") async def stop_capture() -> None: - pose_solver_api.stop_pose_solver() + pose_solver_api.mixer_stop() @pose_solver_app.websocket("/websocket") async def websocket_handler(websocket: WebSocket) -> None: diff --git a/src/mixer/mixer.py b/src/mixer/mixer.py new file mode 100644 index 0000000..ae44f4e --- /dev/null +++ b/src/mixer/mixer.py @@ -0,0 +1,362 @@ +from .api import \ + ExtrinsicCalibrationCalculateRequest, \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationDeleteStagedRequest, \ + ExtrinsicCalibrationImageAddRequest, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetRequest, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListRequest, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationImageMetadataUpdateRequest, \ + ExtrinsicCalibrationResultGetActiveRequest, \ + ExtrinsicCalibrationResultGetActiveResponse, \ + ExtrinsicCalibrationResultGetRequest, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultMetadataListRequest, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + ExtrinsicCalibrationResultMetadataUpdateRequest, \ + PoseSolverAddDetectorFrameRequest, \ + PoseSolverAddTargetRequest, \ + PoseSolverGetPosesRequest, \ + PoseSolverGetPosesResponse, \ + PoseSolverSetExtrinsicRequest, \ + MixerUpdateIntrinsicParametersRequest, \ + PoseSolverSetReferenceRequest, \ + PoseSolverSetTargetsRequest, \ + MixerStartRequest, \ + MixerStopRequest +from src.common import \ + EmptyResponse, \ + ErrorResponse, \ + ExtrinsicCalibration, \ + ExtrinsicCalibrator, \ + MCTCalibrationError, \ + MCTComponent, \ + MCTRequest, \ + MCTResponse, \ + Pose, \ + PoseSolver, \ + PoseSolverException +from enum import StrEnum +import logging +from pydantic import BaseModel, Field +from typing import Callable, Final + + +logger = logging.getLogger(__name__) + + +_ROLE_LABEL: Final[str] = "mixer" + + +# noinspection DuplicatedCode +class Mixer(MCTComponent): + + class Configuration(BaseModel): + serial_identifier: str = Field() + + class Status(StrEnum): + STOPPED = "stopped" + RUNNING = "running" + FAILURE = "failure" + + _status: Status + + _configuration: Configuration + _pose_solver: PoseSolver + _extrinsic_calibrator: ExtrinsicCalibrator + + def __init__( + self, + configuration: Configuration, + pose_solver: PoseSolver, + extrinsic_calibrator: ExtrinsicCalibrator + ): + super().__init__( + status_source_label=configuration.serial_identifier, + send_status_messages_to_logger=True) + + self._configuration = configuration + self._pose_solver = pose_solver + self._extrinsic_calibrator = extrinsic_calibrator + + self._status = Mixer.Status.STOPPED + + def extrinsic_calibrator_calculate( + self, + **_kwargs + ) -> ExtrinsicCalibrationCalculateResponse | ErrorResponse: + result_identifier: str + extrinsic_calibration: ExtrinsicCalibration + try: + result_identifier, extrinsic_calibration = self._extrinsic_calibrator.calculate() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationCalculateResponse( + result_identifier=result_identifier, + extrinsic_calibration=extrinsic_calibration) + + def extrinsic_calibrator_delete_staged( + self, + **_kwargs + ) -> EmptyResponse | ErrorResponse: + try: + self._extrinsic_calibrator.delete_staged() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return EmptyResponse() + + def extrinsic_calibrator_image_add( + self, + **kwargs + ) -> ExtrinsicCalibrationImageAddResponse | ErrorResponse: + request: ExtrinsicCalibrationImageAddRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationImageAddRequest) + image_identifier: str + try: + image_identifier = self._extrinsic_calibrator.add_image( + image_base64=request.image_base64, + detector_label=request.detector_label, + timestamp_utc_iso8601=request.timestamp_utc_iso8601) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationImageAddResponse(image_identifier=image_identifier) + + def extrinsic_calibrator_image_get( + self, + **kwargs + ) -> ExtrinsicCalibrationImageGetResponse | ErrorResponse: + request: ExtrinsicCalibrationImageGetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationImageGetRequest) + image_base64: str + try: + image_base64 = self._extrinsic_calibrator.get_image_by_identifier(identifier=request.image_identifier) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationImageGetResponse(image_base64=image_base64) + + def extrinsic_calibrator_image_metadata_list( + self, + **_kwargs + ) -> ExtrinsicCalibrationImageMetadataListResponse | ErrorResponse: + image_metadata_list: list[ExtrinsicCalibrator.ImageMetadata] + try: + image_metadata_list = self._extrinsic_calibrator.list_image_metadata() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationImageMetadataListResponse(metadata_list=image_metadata_list) + + def extrinsic_calibrator_image_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: ExtrinsicCalibrationImageMetadataUpdateRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationImageMetadataUpdateRequest) + try: + self._extrinsic_calibrator.update_image_metadata( + image_identifier=request.image_identifier, + image_state=request.image_state, + image_label=request.image_label) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return EmptyResponse() + + def extrinsic_calibrator_result_get_active( + self, + **_kwargs + ) -> ExtrinsicCalibrationResultGetActiveResponse | ErrorResponse: + calibration: ExtrinsicCalibration + try: + calibration = self._extrinsic_calibrator.get_result_active() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationResultGetActiveResponse(extrinsic_calibration=calibration) + + def extrinsic_calibrator_result_get( + self, + **kwargs + ) -> ExtrinsicCalibrationResultGetResponse | ErrorResponse: + request: ExtrinsicCalibrationResultGetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationResultGetRequest) + calibration: ExtrinsicCalibration + try: + calibration = self._extrinsic_calibrator.get_result(result_identifier=request.result_identifier) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationResultGetResponse(extrinsic_calibration=calibration) + + def extrinsic_calibrator_result_metadata_list( + self, + **_kwargs + ) -> ExtrinsicCalibrationResultMetadataListResponse | ErrorResponse: + result_metadata_list: list[ExtrinsicCalibrator.ResultMetadata] + try: + result_metadata_list = self._extrinsic_calibrator.list_result_metadata() + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return ExtrinsicCalibrationResultMetadataListResponse(metadata_list=result_metadata_list) + + def extrinsic_calibrator_result_metadata_update( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: ExtrinsicCalibrationResultMetadataUpdateRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=ExtrinsicCalibrationResultMetadataUpdateRequest) + try: + self._extrinsic_calibrator.update_result_metadata( + result_identifier=request.result_identifier, + result_state=request.result_state, + result_label=request.result_label) + except MCTCalibrationError as e: + logger.error(e.private_message) + return ErrorResponse(message=e.public_message) + return EmptyResponse() + + @staticmethod + def get_role_label(): + return _ROLE_LABEL + + def mixer_start(self, **_kwargs) -> EmptyResponse: + self._status = Mixer.Status.RUNNING + return EmptyResponse() + + def mixer_stop(self, **_kwargs) -> EmptyResponse: + self._status = Mixer.Status.STOPPED + return EmptyResponse() + + def mixer_update_intrinsic_parameters( + self, + **kwargs + ) -> EmptyResponse | ErrorResponse: + request: MixerUpdateIntrinsicParametersRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=MixerUpdateIntrinsicParametersRequest) + self._pose_solver.set_intrinsic_parameters( + detector_label=request.detector_label, + intrinsic_parameters=request.intrinsic_parameters) + self._extrinsic_calibrator.intrinsic_parameters_update( + detector_label=request.detector_label, + intrinsic_parameters=request.intrinsic_parameters) + return EmptyResponse() + + def pose_solver_add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverAddDetectorFrameRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverAddDetectorFrameRequest) + try: + self._pose_solver.add_detector_frame( + detector_label=request.detector_label, + frame_annotations=request.detector_frame.annotations, + frame_timestamp_utc=request.detector_frame.timestamp_utc) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_add_target(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverAddTargetRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverAddTargetRequest) + try: + self._pose_solver.add_target(target=request.target) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_get_poses(self, **_kwargs) -> PoseSolverGetPosesResponse | ErrorResponse: + detector_poses: list[Pose] + target_poses: list[Pose] + try: + detector_poses, target_poses = self._pose_solver.get_poses() + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return PoseSolverGetPosesResponse( + detector_poses=detector_poses, + target_poses=target_poses) + + def pose_solver_set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverSetExtrinsicRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverSetExtrinsicRequest) + try: + self._pose_solver.set_extrinsic_matrix( + detector_label=request.detector_label, + transform_to_reference=request.transform_to_reference) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverSetReferenceRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverSetReferenceRequest) + try: + self._pose_solver.set_reference_target(target_id=str(request.marker_id)) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def pose_solver_set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: + request: PoseSolverSetTargetsRequest = self.get_kwarg( + kwargs=kwargs, + key="request", + arg_type=PoseSolverSetTargetsRequest) + try: + self._pose_solver.set_targets(targets=request.targets) + except PoseSolverException as e: + return ErrorResponse(message=e.message) + return EmptyResponse() + + def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() + return_value.update({ + ExtrinsicCalibrationCalculateRequest: self.extrinsic_calibrator_calculate, + ExtrinsicCalibrationDeleteStagedRequest: self.extrinsic_calibrator_delete_staged, + ExtrinsicCalibrationImageAddRequest: self.extrinsic_calibrator_image_add, + ExtrinsicCalibrationImageGetRequest: self.extrinsic_calibrator_image_get, + ExtrinsicCalibrationImageMetadataListRequest: self.extrinsic_calibrator_image_metadata_list, + ExtrinsicCalibrationImageMetadataUpdateRequest: self.extrinsic_calibrator_image_metadata_update, + ExtrinsicCalibrationResultGetActiveRequest: self.extrinsic_calibrator_result_get_active, + ExtrinsicCalibrationResultGetRequest: self.extrinsic_calibrator_result_get, + ExtrinsicCalibrationResultMetadataListRequest: self.extrinsic_calibrator_result_metadata_list, + ExtrinsicCalibrationResultMetadataUpdateRequest: self.extrinsic_calibrator_result_metadata_update, + MixerStartRequest: self.mixer_start, + MixerStopRequest: self.mixer_stop, + MixerUpdateIntrinsicParametersRequest: self.mixer_update_intrinsic_parameters, + PoseSolverAddDetectorFrameRequest: self.pose_solver_add_detector_frame, + PoseSolverAddTargetRequest: self.pose_solver_add_target, + PoseSolverGetPosesRequest: self.pose_solver_get_poses, + PoseSolverSetExtrinsicRequest: self.pose_solver_set_extrinsic_matrix, + PoseSolverSetReferenceRequest: self.pose_solver_set_reference_marker, + PoseSolverSetTargetsRequest: self.pose_solver_set_targets}) + return return_value + + async def update(self): + if self.time_sync_active: + return + if self._status == Mixer.Status.RUNNING: + self._pose_solver.update() diff --git a/src/pose_solver/api.py b/src/pose_solver/api.py deleted file mode 100644 index e9a4f9a..0000000 --- a/src/pose_solver/api.py +++ /dev/null @@ -1,152 +0,0 @@ -from src.common import \ - DetectorFrame, \ - IntrinsicParameters, \ - Matrix4x4, \ - MCTRequest, \ - MCTResponse, \ - Pose, \ - Target -from pydantic import Field -from typing import Final, Literal - - -class PoseSolverAddDetectorFrameRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "add_marker_corners" - - @staticmethod - def type_identifier() -> str: - return PoseSolverAddDetectorFrameRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_label: str = Field() - detector_frame: DetectorFrame = Field() - - -class PoseSolverAddTargetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "add_target" - - @staticmethod - def type_identifier() -> str: - return PoseSolverAddTargetRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - target: Target = Field() - - -class PoseSolverAddTargetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "add_target" - - @staticmethod - def type_identifier() -> str: - return PoseSolverAddTargetResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - target_id: str = Field() - - -class PoseSolverGetPosesRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "get_poses" - - @staticmethod - def type_identifier() -> str: - return PoseSolverGetPosesRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - -class PoseSolverGetPosesResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "get_poses" - - @staticmethod - def type_identifier() -> str: - return PoseSolverGetPosesResponse._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_poses: list[Pose] - target_poses: list[Pose] - - -class PoseSolverSetExtrinsicRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_extrinsic_parameters" - - @staticmethod - def type_identifier() -> str: - return PoseSolverSetExtrinsicRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_label: str = Field() - transform_to_reference: Matrix4x4 = Field() - - -class PoseSolverSetIntrinsicRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_intrinsic_parameters" - - @staticmethod - def type_identifier() -> str: - return PoseSolverSetIntrinsicRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - detector_label: str = Field() - intrinsic_parameters: IntrinsicParameters = Field() - - -class PoseSolverSetReferenceRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_reference_marker" - - @staticmethod - def type_identifier() -> str: - return PoseSolverSetReferenceRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - marker_id: int = Field() - marker_diameter: float = Field() - - -class PoseSolverSetTargetsRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "set_targets" - - @staticmethod - def type_identifier() -> str: - return PoseSolverSetTargetsRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - targets: list[Target] = Field() - - -class PoseSolverStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "start_pose_solver" - - @staticmethod - def type_identifier() -> str: - return PoseSolverStartRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) - - -class PoseSolverStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "stop_pose_solver" - - @staticmethod - def type_identifier() -> str: - return PoseSolverStopRequest._TYPE_IDENTIFIER - - # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) diff --git a/src/pose_solver/pose_solver_api.py b/src/pose_solver/pose_solver_api.py deleted file mode 100644 index 1f258e0..0000000 --- a/src/pose_solver/pose_solver_api.py +++ /dev/null @@ -1,181 +0,0 @@ -from .api import \ - PoseSolverAddDetectorFrameRequest, \ - PoseSolverAddTargetRequest, \ - PoseSolverGetPosesRequest, \ - PoseSolverGetPosesResponse, \ - PoseSolverSetExtrinsicRequest, \ - PoseSolverSetIntrinsicRequest, \ - PoseSolverSetReferenceRequest, \ - PoseSolverSetTargetsRequest, \ - PoseSolverStartRequest, \ - PoseSolverStopRequest -from .pose_solver import \ - PoseSolver, \ - PoseSolverException -from .structures import \ - PoseSolverConfiguration -from src.common import \ - EmptyResponse, \ - ErrorResponse, \ - MCTComponent, \ - MCTRequest, \ - MCTResponse, \ - Pose -from enum import StrEnum -import logging -from typing import Callable, Final - - -logger = logging.getLogger(__name__) - - -_ROLE_LABEL: Final[str] = "pose_solver" - - -class PoseSolverAPI(MCTComponent): - """ - API-friendly layer overtop of a PoseSolver - """ - - class Status: - - class Solve(StrEnum): - STOPPED = "stopped" - RUNNING = "running" - FAILURE = "failure" - - solve_status: Solve - solve_errors: list[str] - - def __init__(self): - self.solve_status = PoseSolverAPI.Status.Solve.STOPPED - self.solve_errors = list() - - _status: Status - _pose_solver: PoseSolver - - def __init__( - self, - configuration: PoseSolverConfiguration, - pose_solver: PoseSolver - ): - super().__init__( - status_source_label=configuration.serial_identifier, - send_status_messages_to_logger=True) - self._pose_solver = pose_solver - self._status = PoseSolverAPI.Status() - - def add_detector_frame(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddDetectorFrameRequest = self.get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverAddDetectorFrameRequest) - try: - self._pose_solver.add_detector_frame( - detector_label=request.detector_label, - detector_frame=request.detector_frame) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def add_target(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverAddTargetRequest = self.get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverAddTargetRequest) - try: - self._pose_solver.add_target(target=request.target) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def get_poses(self, **_kwargs) -> PoseSolverGetPosesResponse | ErrorResponse: - detector_poses: list[Pose] - target_poses: list[Pose] - try: - detector_poses, target_poses = self._pose_solver.get_poses() - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return PoseSolverGetPosesResponse( - detector_poses=detector_poses, - target_poses=target_poses) - - @staticmethod - def get_role_label(): - return _ROLE_LABEL - - def set_extrinsic_matrix(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetExtrinsicRequest = self.get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetExtrinsicRequest) - try: - self._pose_solver.set_extrinsic_matrix( - detector_label=request.detector_label, - transform_to_reference=request.transform_to_reference) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def set_intrinsic_parameters(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetIntrinsicRequest = self.get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetIntrinsicRequest) - try: - self._pose_solver.set_intrinsic_parameters( - detector_label=request.detector_label, - intrinsic_parameters=request.intrinsic_parameters) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def set_reference_marker(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetReferenceRequest = self.get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetReferenceRequest) - try: - self._pose_solver.set_reference_target(target_id=str(request.marker_id)) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: - request: PoseSolverSetTargetsRequest = self.get_kwarg( - kwargs=kwargs, - key="request", - arg_type=PoseSolverSetTargetsRequest) - try: - self._pose_solver.set_targets(targets=request.targets) - except PoseSolverException as e: - return ErrorResponse(message=e.message) - return EmptyResponse() - - def start_pose_solver(self, **_kwargs) -> EmptyResponse: - self._status.solve_status = PoseSolverAPI.Status.Solve.RUNNING - return EmptyResponse() - - def stop_pose_solver(self, **_kwargs) -> EmptyResponse: - self._status.solve_status = PoseSolverAPI.Status.Solve.STOPPED - return EmptyResponse() - - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: - return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() - return_value.update({ - PoseSolverAddDetectorFrameRequest: self.add_detector_frame, - PoseSolverAddTargetRequest: self.add_target, - PoseSolverGetPosesRequest: self.get_poses, - PoseSolverSetExtrinsicRequest: self.set_extrinsic_matrix, - PoseSolverSetIntrinsicRequest: self.set_intrinsic_parameters, - PoseSolverSetReferenceRequest: self.set_reference_marker, - PoseSolverSetTargetsRequest: self.set_targets, - PoseSolverStartRequest: self.start_pose_solver, - PoseSolverStopRequest: self.stop_pose_solver}) - return return_value - - async def update(self): - if self.time_sync_active: - return - if self._status.solve_status == PoseSolverAPI.Status.Solve.RUNNING: - self._pose_solver.update() diff --git a/src/pose_solver/structures.py b/src/pose_solver/structures.py deleted file mode 100644 index c4e1f1f..0000000 --- a/src/pose_solver/structures.py +++ /dev/null @@ -1,114 +0,0 @@ -from src.common import \ - Annotation, \ - DetectorFrame -import cv2.aruco -import datetime -from pydantic import BaseModel, Field - - -class DetectorRecord: - """ - Class whose purpose is to keep track of the latest position of each landmark (in annotation form) - for a single detector. - """ - - class TimestampedAnnotation: - annotation: Annotation - timestamp_utc: datetime.datetime - def __init__( - self, - annotation: Annotation, - timestamp_utc: datetime.datetime - ): - self.annotation = annotation - self.timestamp_utc = timestamp_utc - - _timestamped_annotations: dict[str, TimestampedAnnotation] - - def __init__(self): - self._timestamped_annotations = dict() - - def add_frame_record( - self, - frame: DetectorFrame - ) -> None: - for annotation in frame.annotations: - if annotation.feature_label not in self._timestamped_annotations: - self._timestamped_annotations[annotation.feature_label] = DetectorRecord.TimestampedAnnotation( - annotation=annotation, - timestamp_utc=frame.timestamp_utc) - continue - timestamped_annotation: DetectorRecord.TimestampedAnnotation = \ - self._timestamped_annotations[annotation.feature_label] - if frame.timestamp_utc > timestamped_annotation.timestamp_utc: - self._timestamped_annotations[annotation.feature_label] = DetectorRecord.TimestampedAnnotation( - annotation=annotation, - timestamp_utc=frame.timestamp_utc) - - def clear_frame_records(self): - self._timestamped_annotations.clear() - - def clear_frame_records_older_than( - self, - timestamp_utc: datetime.datetime - ) -> bool: - """ - returns True if any changes were made - """ - feature_labels_to_remove: list[str] = list() - for entry in self._timestamped_annotations.values(): - if entry.timestamp_utc < timestamp_utc: - feature_labels_to_remove.append(entry.annotation.feature_label) - if len(feature_labels_to_remove) <= 0: - return False - for feature_label in feature_labels_to_remove: - del self._timestamped_annotations[feature_label] - return True - - def get_annotations( - self, - deep_copy: bool = True - ) -> list[Annotation]: - if deep_copy: - return [entry.annotation.model_copy() for entry in self._timestamped_annotations.values()] - return [entry.annotation for entry in self._timestamped_annotations.values()] - - -class PoseSolverConfiguration(BaseModel): - serial_identifier: str = Field() - - -class PoseSolverParameters(BaseModel): - minimum_detector_count: int = Field(default=2) - MAXIMUM_RAY_COUNT_FOR_INTERSECTION: int = Field(default=2) - POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS: float = Field(default=0.1) - POSE_SINGLE_CAMERA_EXTRAPOLATION_MINIMUM_SURFACE_NORMAL_ANGLE_DEGREES: float = Field(default=15.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS: float = Field(default=1.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_MAXIMUM_ORDER: int = Field(default=0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_ANGLE_DEGREES: float = Field(default=15.0) - POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_DISTANCE: float = Field(default=15.0, description="millimeters") - POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS: float = Field(default=0.8) - POSE_SINGLE_CAMERA_NEAREST_LIMIT_ANGLE_DEGREES: float = Field(default=15.0) - POSE_SINGLE_CAMERA_NEAREST_LIMIT_DISTANCE: float = Field(default=15.0) - POSE_SINGLE_CAMERA_REPROJECTION_ERROR_FACTOR_BETA_OVER_ALPHA: float = Field(default=1.0) - POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS: float = Field(default=0.4) - # TODO: Is this next one detector-specific? - POSE_SINGLE_CAMERA_DEPTH_CORRECTION: float = Field(default=-7.5, description="millimeters, observed tendency to overestimate depth.") - POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS: float = Field(default=1.0) - INTERSECTION_MAXIMUM_DISTANCE: float = Field(default=10.0, description="millimeters") - icp_termination_iteration_count: int = Field(default=50) - icp_termination_translation: float = Field(default=0.005, description="millimeters") - icp_termination_rotation_radians: float = Field(default=0.0005) - icp_termination_mean_point_distance: float = Field(default=0.1, description="millimeters") - icp_termination_rms_point_distance: float = Field(default=0.1, description="millimeters") - DENOISE_OUTLIER_DISTANCE_MILLIMETERS: float = Field(default=10.0) - DENOISE_OUTLIER_ANGLE_DEGREES: float = Field(default=5.0) - DENOISE_STORAGE_SIZE: int = Field(default=10) - DENOISE_FILTER_SIZE: int = Field(default=7) - DENOISE_REQUIRED_STARTING_STREAK: int = Field(default=3) - ARUCO_MARKER_DICTIONARY_ENUM: int = Field(default=cv2.aruco.DICT_4X4_100) - ARUCO_POSE_ESTIMATOR_METHOD: int = Field(default=cv2.SOLVEPNP_ITERATIVE) - # SOLVEPNP_ITERATIVE works okay but is susceptible to optical illusions (flipping) - # SOLVEPNP_P3P appears to return nan's on rare occasion - # SOLVEPNP_SQPNP appears to return nan's on rare occasion - # SOLVEPNP_IPPE_SQUARE does not seem to work very well at all, translation is much smaller than expected diff --git a/test/test_extrinsic_calibration.py b/test/test_extrinsic_calibration.py index db51f60..7fa38b7 100644 --- a/test/test_extrinsic_calibration.py +++ b/test/test_extrinsic_calibration.py @@ -99,7 +99,11 @@ def test(self): image_base64=image_base64, detector_label=camera_id, timestamp_utc_iso8601=timestamps_iso8601_by_frame[frame_id]) - _, extrinsic_calibration = extrinsic_calibrator.calculate(detector_intrinsics_by_label=intrinsics_by_camera) + for camera_id, intrinsic_parameters in intrinsics_by_camera.items(): + extrinsic_calibrator.intrinsic_parameters_update( + detector_label=camera_id, + intrinsic_parameters=intrinsic_parameters) + _, extrinsic_calibration = extrinsic_calibrator.calculate() # label, translation, rotation (as quaternion) ground_truth_detector_poses: dict[str, tuple[list[float], list[float]]] = { diff --git a/test/test_pose_solver.py b/test/test_pose_solver.py index 0d018c0..81c5c2c 100644 --- a/test/test_pose_solver.py +++ b/test/test_pose_solver.py @@ -1,11 +1,9 @@ -from src.pose_solver.pose_solver import PoseSolver from src.common import \ Annotation, \ - DetectorFrame, \ - ImageResolution, \ IntrinsicParameters, \ Matrix4x4, \ Pose, \ + PoseSolver, \ Target from src.implementations.common_aruco_opencv import ArucoOpenCVCommon import datetime @@ -13,7 +11,6 @@ import unittest -IMAGE_RESOLUTION: Final[ImageResolution] = ImageResolution(x_px=640, y_px=480) REL_CHAR: Final[str] = Annotation.RELATION_CHARACTER # For brevity MARKER_SIZE_MM: Final[float] = 10.0 REFERENCE_TARGET_ID: Final[str] = "reference" @@ -102,7 +99,7 @@ def test_single_camera_viewing_target_marker(self): now_utc = datetime.datetime.now(datetime.timezone.utc) pose_solver: PoseSolver = PoseSolver() # TODO: The following line shall be replaced upon implementation of an appropriate mechanism - pose_solver._parameters.minimum_detector_count = 1 + pose_solver._configuration.minimum_detector_count = 1 pose_solver.set_intrinsic_parameters( detector_label=DETECTOR_RED_NAME, intrinsic_parameters=DETECTOR_RED_INTRINSICS) @@ -112,18 +109,16 @@ def test_single_camera_viewing_target_marker(self): # Reference is on the left, target is on the right, both in the same plane and along the x-axis of the image. pose_solver.add_detector_frame( detector_label=DETECTOR_RED_NAME, - detector_frame=DetectorFrame( - annotations=[ - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=375, y_px=347), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=415, y_px=346), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=416, y_px=386), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=376, y_px=386), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=541, y_px=347), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=581, y_px=348), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=580, y_px=388), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=540, y_px=387)], - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=375, y_px=347), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=415, y_px=346), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=416, y_px=386), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=376, y_px=386), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=541, y_px=347), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=581, y_px=348), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=580, y_px=388), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=540, y_px=387)], + frame_timestamp_utc=now_utc) pose_solver.update() detector_poses: list[Pose] target_poses: list[Pose] @@ -176,60 +171,52 @@ def test_four_cameras_viewing_target_marker(self): pose_solver.set_reference_target(target_id=REFERENCE_MARKER_TARGET.label) pose_solver.add_detector_frame( detector_label=DETECTOR_RED_NAME, - detector_frame=DetectorFrame( - annotations=[ - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=157, y_px=210), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=165, y_px=221), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=139, y_px=229), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=131, y_px=217), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=196, y_px=266), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=206, y_px=281), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=178, y_px=291), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=167, y_px=275)], - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=157, y_px=210), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=165, y_px=221), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=139, y_px=229), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=131, y_px=217), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=196, y_px=266), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=206, y_px=281), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=178, y_px=291), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=167, y_px=275)], + frame_timestamp_utc=now_utc) pose_solver.add_detector_frame( detector_label=DETECTOR_SKY_NAME, - detector_frame=DetectorFrame( - annotations=[ - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=190, y_px=234), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=219, y_px=246), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=195, y_px=270), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=166, y_px=257), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=317, y_px=290), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=352, y_px=306), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=332, y_px=333), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=296, y_px=317)], - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=190, y_px=234), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=219, y_px=246), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=195, y_px=270), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=166, y_px=257), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=317, y_px=290), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=352, y_px=306), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=332, y_px=333), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=296, y_px=317)], + frame_timestamp_utc=now_utc) pose_solver.add_detector_frame( detector_label=DETECTOR_GREEN_NAME, - detector_frame=DetectorFrame( - annotations=[ - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=247, y_px=304), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=283, y_px=296), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=291, y_px=326), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=254, y_px=334), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=392, y_px=277), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=426, y_px=271), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=438, y_px=299), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=403, y_px=305)], - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=247, y_px=304), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=283, y_px=296), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=291, y_px=326), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=254, y_px=334), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=392, y_px=277), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=426, y_px=271), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=438, y_px=299), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=403, y_px=305)], + frame_timestamp_utc=now_utc) pose_solver.add_detector_frame( detector_label=DETECTOR_YELLOW_NAME, - detector_frame=DetectorFrame( - annotations=[ - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=275, y_px=277), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=289, y_px=251), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=321, y_px=261), - Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=306, y_px=288), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=332, y_px=177), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=344, y_px=156), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=372, y_px=163), - Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=361, y_px=185)], - image_resolution=IMAGE_RESOLUTION, - timestamp_utc_iso8601=now_utc.isoformat())) + frame_annotations=[ + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}0", x_px=275, y_px=277), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}1", x_px=289, y_px=251), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}2", x_px=321, y_px=261), + Annotation(feature_label=f"{str(REFERENCE_MARKER_ID)}{REL_CHAR}3", x_px=306, y_px=288), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}0", x_px=332, y_px=177), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}1", x_px=344, y_px=156), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}2", x_px=372, y_px=163), + Annotation(feature_label=f"{str(TARGET_MARKER_ID)}{REL_CHAR}3", x_px=361, y_px=185)], + frame_timestamp_utc=now_utc) pose_solver.update() detector_poses: list[Pose] target_poses: list[Pose] From 23781a4789a09699dbd2198ddb80efaeef364219 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Mon, 25 Aug 2025 16:32:11 -0400 Subject: [PATCH 22/33] BUG: Update/correct imports, remove unused code --- src/board_builder/board_builder.py | 56 +- src/board_builder/{utils => }/graph_search.py | 0 src/board_builder/structures.py | 89 +- src/board_builder/test/graph_search_test.py | 2 +- src/board_builder/utils/__init__.py | 1 - .../utils/board_builder_pose_solver.py | 771 ------------------ src/controller/connection.py | 12 +- src/controller/mct_controller.py | 26 +- 8 files changed, 45 insertions(+), 912 deletions(-) rename src/board_builder/{utils => }/graph_search.py (100%) delete mode 100644 src/board_builder/utils/__init__.py delete mode 100644 src/board_builder/utils/board_builder_pose_solver.py diff --git a/src/board_builder/board_builder.py b/src/board_builder/board_builder.py index 11247d5..dc4b56c 100644 --- a/src/board_builder/board_builder.py +++ b/src/board_builder/board_builder.py @@ -1,6 +1,6 @@ -from .utils import BoardBuilderPoseSolver -from .structures import PoseLocation, Marker, MarkerCorners, TargetBoard -from src.common import Pose, Annotation, Matrix4x4 +from .structures import PoseLocation, Marker, TargetBoard +from src.common import Annotation, Matrix4x4, Pose, PoseSolver +from src.implementations.common_aruco_opencv import ArucoOpenCVCommon from collections import defaultdict import datetime import json @@ -11,6 +11,7 @@ _HOMOGENEOUS_POINT_COORD: Final[int] = 4 TESTED_BOARD_NAME: str = 'top_data.json' # If collecting data for repeatability test, specify the file name. cube_data.json, planar_data.json, top_data.json +MARKER_SIZE_MM: Final[float] = 10.0 class BoardBuilder: @@ -38,7 +39,7 @@ def __init__(self, marker_size): self.marker_size = marker_size # in mm self._index_to_marker_uuid = dict() self._index_to_marker_id = dict() - self.pose_solver = BoardBuilderPoseSolver() + self.pose_solver = PoseSolver() # matrix init self._matrix_id_index = 0 @@ -149,7 +150,9 @@ def _solve_pose(self, detector_data: dict[str, list[Annotation]], timestamp: dat for detector_name in detector_data: for marker_snapshot in detector_data[detector_name]: if marker_snapshot.feature_label not in list(self._index_to_marker_id.values()): - self.pose_solver.add_target_marker(int(marker_snapshot.feature_label)) + self.pose_solver.add_target(ArucoOpenCVCommon.target_from_marker_parameters( + base_label=str(int(marker_snapshot.feature_label)), + marker_size=MARKER_SIZE_MM)) self._expand_relative_pose_matrix() self._index_to_marker_id[self._matrix_id_index] = marker_snapshot.feature_label self._matrix_id_index += 1 @@ -157,19 +160,12 @@ def _solve_pose(self, detector_data: dict[str, list[Annotation]], timestamp: dat for detector_name in detector_data: # assumes 4 corners per marker for i in range(0, len(detector_data[detector_name]), 4): - corners_list: list[list[float]] = [ # Indexed as [point][coordinate] - [detector_data[detector_name][i].x_px, detector_data[detector_name][i].y_px], - [detector_data[detector_name][i+1].x_px, detector_data[detector_name][i+1].y_px], - [detector_data[detector_name][i+2].x_px, detector_data[detector_name][i+2].y_px], - [detector_data[detector_name][i+3].x_px, detector_data[detector_name][i+3].y_px]] - marker_corners = MarkerCorners( + self.pose_solver.add_detector_frame( detector_label=detector_name, - marker_id=int(detector_data[detector_name][i].feature_label), - points=corners_list, - timestamp=timestamp) - self.pose_solver.add_marker_corners([marker_corners]) + frame_annotations=detector_data[detector_name], + frame_timestamp_utc=timestamp) - target_poses = self.pose_solver.get_target_poses() + _, target_poses = self.pose_solver.get_poses() self.target_poses = target_poses for pose in target_poses: if pose.target_id not in list(self._index_to_marker_uuid.values()): @@ -249,19 +245,12 @@ def locate_reference_board(self, detector_data: dict[str, list[Annotation]]): timestamp = datetime.datetime.now(tz=datetime.timezone.utc) for detector_name in detector_data: for i in range(0, len(detector_data[detector_name]), 4): - corners_list: list[list[float]] = [ # Indexed as [point][coordinate] - [detector_data[detector_name][i].x_px, detector_data[detector_name][i].y_px], - [detector_data[detector_name][i+1].x_px, detector_data[detector_name][i+1].y_px], - [detector_data[detector_name][i+2].x_px, detector_data[detector_name][i+2].y_px], - [detector_data[detector_name][i+3].x_px, detector_data[detector_name][i+3].y_px]] - marker_corners = MarkerCorners( + self.pose_solver.add_detector_frame( detector_label=detector_name, - marker_id=int(detector_data[detector_name][i].feature_label), - points=corners_list, - timestamp=timestamp) - self.pose_solver.add_marker_corners([marker_corners]) + frame_annotations=detector_data[detector_name], + frame_timestamp_utc=timestamp) - new_detector_poses = self.pose_solver.get_detector_poses() + new_detector_poses, _ = self.pose_solver.get_poses() for pose in new_detector_poses: if pose.target_id not in self._detector_poses_median: self._detector_poses_median[pose.target_id] = PoseLocation(pose.target_id) @@ -269,12 +258,15 @@ def locate_reference_board(self, detector_data: dict[str, list[Annotation]]): pose.object_to_reference_matrix.as_numpy_array(), timestamp.isoformat()) for label in self._detector_poses_median: - pose = Pose( + detector_to_reference: Matrix4x4 = \ + self._detector_poses_median[label].get_median_pose().object_to_reference_matrix + self.pose_solver.set_extrinsic_matrix( + detector_label=label, + transform_to_reference=detector_to_reference) + self.detector_poses.append(Pose( target_id=label, - object_to_reference_matrix=self._detector_poses_median[label].get_median_pose().object_to_reference_matrix, - solver_timestamp_utc_iso8601=timestamp.isoformat()) - self.detector_poses.append(pose) - self.pose_solver.set_detector_poses(self.detector_poses) + object_to_reference_matrix=detector_to_reference, + solver_timestamp_utc_iso8601=timestamp.isoformat())) def collect_data(self, detector_data: dict[str, list[Annotation]]): """ Collects data of relative position and is entered in matrix. Returns a dictionary of its corners""" diff --git a/src/board_builder/utils/graph_search.py b/src/board_builder/graph_search.py similarity index 100% rename from src/board_builder/utils/graph_search.py rename to src/board_builder/graph_search.py diff --git a/src/board_builder/structures.py b/src/board_builder/structures.py index 8385a74..9b5e603 100644 --- a/src/board_builder/structures.py +++ b/src/board_builder/structures.py @@ -1,5 +1,4 @@ -from src.common.util import MathUtils -from src.common.structures import Matrix4x4, Pose +from src.common import MathUtils, Matrix4x4, Pose import abc import datetime import numpy as np @@ -7,59 +6,6 @@ from scipy.spatial.transform import Rotation as R -# TODO: Merge into a similar structure in common -class MarkerCorners: - detector_label: str - marker_id: int - points: list[list[float]] - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - marker_id: int, - points: list[list[float]], - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.marker_id = marker_id - self.points = points - self.timestamp = timestamp - - -class MarkerRaySet(BaseModel): - marker_id: int = Field() - image_points: list[list[float]] = Field() # image positions of marker corners. Size 4. - image_timestamp: datetime.datetime = Field() - ray_origin_reference: list[float] = Field() # Shared origin for all rays (same detector) - ray_directions_reference: list[list[float]] = Field() # Size 4 (one for each image point) - detector_label: str = Field() - detector_to_reference_matrix: Matrix4x4 = Field() - - @staticmethod - def age_seconds( - marker_ray_set, - query_timestamp: datetime.datetime - ): - return (query_timestamp - marker_ray_set.image_timestamp).total_seconds() - - @staticmethod - def newest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: - return_value = datetime.datetime.now() - for ray_set in marker_ray_set_list: - if ray_set.image_timestamp > return_value: - return_value = ray_set.image_timestamp - return return_value - - @staticmethod - def oldest_timestamp_in_list(marker_ray_set_list: list) -> datetime.datetime: - return_value = datetime.datetime.utcfromtimestamp(0) - for ray_set in marker_ray_set_list: - if ray_set.image_timestamp > return_value: - return_value = ray_set.image_timestamp - return return_value - - class MatrixNode: def __init__(self, node_id: str): self.id = node_id @@ -71,26 +17,6 @@ def add_neighbour(self, neighbour_node, weight: int): self.weights[neighbour_node.id] = weight -# TODO: Merge/replace this with pose under common data structures -class PoseData(BaseModel): - target_id: str = Field() - object_to_reference_matrix: Matrix4x4 = Field() - ray_sets: list[MarkerRaySet] - - def newest_timestamp(self) -> datetime.datetime: - return MarkerRaySet.newest_timestamp_in_list(self.ray_sets) - - def oldest_timestamp(self) -> datetime.datetime: - return MarkerRaySet.oldest_timestamp_in_list(self.ray_sets) - - @staticmethod - def age_seconds( - pose, - query_timestamp: datetime.datetime - ) -> float: - return (query_timestamp - pose.oldest_timestamp()).total_seconds() - - class PoseLocation: _id: str @@ -191,19 +117,6 @@ def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: ... def get_points(self) -> list[list[float]]: ... -class TargetMarker(TargetBase, Marker): - def get_marker_ids(self) -> list[str]: - return [self.marker_id] - - def get_points(self) -> list[list[float]]: - return self.get_points_internal() - - def get_points_for_marker_id(self, marker_id: str) -> list[list[float]]: - if marker_id != self.marker_id: - raise IndexError(f"marker_id {marker_id} is not in target {self.label}") - return self.get_points_internal() - - class TargetBoard(TargetBase): markers: list[Marker] = Field() _marker_dict: None | dict[str, Marker] = PrivateAttr() diff --git a/src/board_builder/test/graph_search_test.py b/src/board_builder/test/graph_search_test.py index 67c7245..c9d6fba 100644 --- a/src/board_builder/test/graph_search_test.py +++ b/src/board_builder/test/graph_search_test.py @@ -1,7 +1,7 @@ import datetime from src.common import Matrix4x4 from src.board_builder.structures import PoseLocation -from src.board_builder.utils.graph_search import create_graph, bfs_shortest_path, get_transform_from_root +from src.board_builder.graph_search import create_graph, bfs_shortest_path, get_transform_from_root """ diff --git a/src/board_builder/utils/__init__.py b/src/board_builder/utils/__init__.py deleted file mode 100644 index ea42dcb..0000000 --- a/src/board_builder/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from src.board_builder.utils.board_builder_pose_solver import BoardBuilderPoseSolver diff --git a/src/board_builder/utils/board_builder_pose_solver.py b/src/board_builder/utils/board_builder_pose_solver.py deleted file mode 100644 index b5b90c9..0000000 --- a/src/board_builder/utils/board_builder_pose_solver.py +++ /dev/null @@ -1,771 +0,0 @@ -from src.board_builder.structures import \ - MarkerCorners, \ - MarkerRaySet, \ - PoseData, \ - PoseLocation -from src.common import \ - IntrinsicParameters, \ - IterativeClosestPointParameters, \ - MathUtils, \ - Matrix4x4, \ - Pose, \ - Ray -from src.board_builder.structures import TargetBase, TargetMarker -from src.implementations.common_aruco_opencv import ArucoOpenCVCommon -from src.pose_solver.structures import PoseSolverParameters -import cv2 -import cv2.aruco -import datetime -import numpy -from scipy.spatial.transform import Rotation -from typing import Callable, TypeVar -import uuid - - -KeyType = TypeVar("KeyType") -ValueType = TypeVar("ValueType") - - -class ImagePointSetsKey: - detector_label: str - timestamp: datetime.datetime - - def __init__( - self, - detector_label: str, - timestamp: datetime.datetime - ): - self.detector_label = detector_label - self.timestamp = timestamp - - def _key(self): - return self.detector_label, self.timestamp - - def __eq__(self, other): - if isinstance(other, ImagePointSetsKey): - return self._key() == other._key() - return False - - def __hash__(self): - return hash(self._key()) - - -class MarkerKey: - detector_label: str - marker_id: str - - def __init__( - self, - detector_label: str, - marker_id: str - ): - self.detector_label = detector_label - self.marker_id = marker_id - - def _key(self): - return self.detector_label, self.marker_id - - def __eq__(self, other): - if isinstance(other, MarkerKey): - return self._key() == other._key() - return False - - def __hash__(self): - return hash(self._key()) - - def __str__(self): - return str("(" + self.detector_label + "," + str(self.marker_id) + ")") - - -class CornerSetReference: - marker_id: str - corners: list[list[float]] # in reference coordinate system - ray_sets: list[MarkerRaySet] - - def __init__( - self, - marker_id: str, - corners: list[list[float]], - ray_sets: list[MarkerRaySet] - ): - self.marker_id = marker_id - self.corners = corners - self.ray_sets = ray_sets - - -class TargetDepthKey: - target_id: uuid.UUID - detector_label: str - - def __init__( - self, - target_id: uuid.UUID, - detector_label: str - ): - self.target_id = target_id - self.detector_label = detector_label - - def _key(self): - return self.target_id, self.detector_label - - def __eq__(self, other): - if isinstance(other, TargetDepthKey): - return self._key() == other._key() - return False - - def __hash__(self): - return hash(self._key()) - - -class TargetDepth: - target_id: uuid.UUID - detector_label: str - estimate_timestamp: datetime.datetime - depth: float - - def __init__( - self, - target_id: uuid.UUID, - detector_label: str, - estimate_timestamp: datetime.datetime, - depth: float - ): - self.target_id = target_id - self.detector_label = detector_label - self.estimate_timestamp = estimate_timestamp - self.depth = depth - - @staticmethod - def age_seconds( - target_depth, - query_timestamp: datetime.datetime - ): - return (query_timestamp - target_depth.estimate_timestamp).total_seconds() - - -class BoardBuilderPoseSolver: - """ - Class containing the actual "solver" logic, kept separate from the API. - """ - - _intrinsics_by_detector_label: dict[str, IntrinsicParameters] - _targets: dict[uuid.UUID, TargetBase] - - _marker_corners_since_update: list[MarkerCorners] - - _marker_rayset_by_marker_key: dict[MarkerKey, MarkerRaySet] - - _alpha_poses_by_target_id: dict[uuid.UUID, list[PoseData]] - _target_extrapolation_poses_by_target_id: dict[uuid.UUID, list[PoseData]] - _poses_by_target_id: dict[uuid.UUID, PoseData] - _poses_by_detector_label: dict[str, Matrix4x4] - _target_depths_by_target_depth_key: dict[TargetDepthKey, list[TargetDepth]] - _poses_average_by_detector_label: dict[str, PoseLocation] - _detector_poses: list[Pose] - - _minimum_marker_age_before_removal_seconds: float - - _board_marker_ids: list[int] - _board_marker_positions: list[list[float]] - _board_marker_size: int - - def __init__(self): - - self._intrinsics_by_detector_label = dict() - self._parameters = PoseSolverParameters() - self._targets = dict() - self._marker_corners_since_update = list() - self._marker_rayset_by_marker_key = dict() - self._alpha_poses_by_target_id = dict() - self._target_extrapolation_poses_by_target_id = dict() - self._poses_by_target_id = dict() - self._poses_by_detector_label = dict() - self._target_depths_by_target_depth_key = dict() - self._poses_average_by_detector_label = dict() - self._detector_poses = list() - - self._minimum_marker_age_before_removal_seconds = max([ - self._parameters.POSE_DETECTOR_DENOISE_LIMIT_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS, - self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS, - self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS]) - - self._charuco_board = ArucoOpenCVCommon.CharucoBoard() - self._board_marker_ids = self._charuco_board.get_marker_ids() - self._board_marker_positions = self._charuco_board.get_marker_center_points() - self._board_marker_size = 10 - - def add_marker_corners( - self, - detected_corners: list[MarkerCorners] - ) -> None: - self._marker_corners_since_update += detected_corners - - def add_target_marker( - self, - marker_id: int, - ) -> bool: - for target_id, target in self._targets.items(): - if isinstance(target, TargetMarker) and marker_id == target.marker_id: - return False - target: TargetBase = TargetMarker( - label=str(marker_id), - marker_id=str(marker_id), - marker_size=self._board_marker_size) - target_id: uuid.UUID = uuid.uuid4() - self._targets[target_id] = target - return True - - def get_detector_poses( - self - ) -> list[Pose]: - self._estimate_detector_pose_relative_to_reference() - detector_poses: list[Pose] = [ - Pose( - target_id=detector_label, - object_to_reference_matrix=pose, - solver_timestamp_utc_iso8601=datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) - for detector_label, pose in self._poses_by_detector_label.items()] - return detector_poses - - def get_target_poses( - self - ) -> list[Pose]: - self._estimate_target_pose_relative_to_reference() - target_poses: list[Pose] = [ - Pose( - target_id=str(target_id), - object_to_reference_matrix=pose.object_to_reference_matrix, - solver_timestamp_utc_iso8601=str(pose.newest_timestamp().isoformat())) - for target_id, pose in self._poses_by_target_id.items()] - return target_poses - - def set_intrinsic_parameters( - self, - detector_label: str, - intrinsic_parameters: IntrinsicParameters - ) -> None: - self._intrinsics_by_detector_label[detector_label] = intrinsic_parameters - - def set_detector_poses(self, detector_poses): - self._detector_poses = detector_poses - - def set_board_marker_size(self, board_marker_size): - self._board_marker_size = board_marker_size - - def _calculate_marker_ray_set( - self, - image_point_set: MarkerCorners, - detector_to_reference_matrix: Matrix4x4 - ) -> MarkerRaySet: - undistorted_points_original: numpy.ndarray = numpy.array(image_point_set.points, dtype="float32") - undistorted_points_original = numpy.reshape( - a=undistorted_points_original, - newshape=(1, len(image_point_set.points), 2)) - camera_matrix: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[image_point_set.detector_label].get_matrix()) - distortion_coefficients: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[image_point_set.detector_label].get_distortion_coefficients()) - undistorted_points_normalized = cv2.undistortPoints( - src=undistorted_points_original, - cameraMatrix=camera_matrix, - distCoeffs=distortion_coefficients) - ray_directions: list[list[float]] = list() - origin_point_detector = [0, 0, 0, 1] # origin - detector_to_reference: numpy.ndarray = detector_to_reference_matrix.as_numpy_array() - ray_origin_reference = numpy.matmul(detector_to_reference, origin_point_detector) - ray_origin_reference = ray_origin_reference.tolist()[0:3] - for point_normalized in undistorted_points_normalized: - target_point_image = [point_normalized[0, 0], point_normalized[0, 1], 1, 1] # reverse perspective - target_point_detector = MathUtils.image_to_opengl_vector(target_point_image) - ray_direction_detector = numpy.subtract(target_point_detector, origin_point_detector) - ray_direction_detector = ray_direction_detector / numpy.linalg.norm(ray_direction_detector) # normalize - ray_direction_reference = numpy.matmul(detector_to_reference, ray_direction_detector) - ray_directions.append(list(ray_direction_reference[0:3])) - return MarkerRaySet( - marker_id=image_point_set.marker_id, - image_points=image_point_set.points, - image_timestamp=image_point_set.timestamp, - detector_label=image_point_set.detector_label, - detector_to_reference_matrix=detector_to_reference_matrix, - ray_origin_reference=ray_origin_reference, - ray_directions_reference=ray_directions) - - def _clear_old_values( - self, - query_timestamp: datetime.datetime - ) -> bool: # whether any dict's have changed or not - changed = False - self._marker_rayset_by_marker_key, modified = self._clear_old_values_from_dict( - input_dict=self._marker_rayset_by_marker_key, - age_from_value_function=MarkerRaySet.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._minimum_marker_age_before_removal_seconds) - changed |= modified - self._alpha_poses_by_target_id, modified = self._clear_old_values_from_dict_of_lists( - input_dict=self._alpha_poses_by_target_id, - age_from_value_function=PoseData.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._parameters.POSE_SINGLE_CAMERA_NEAREST_LIMIT_RAY_AGE_SECONDS) - changed |= modified - self._target_extrapolation_poses_by_target_id, modified = self._clear_old_values_from_dict_of_lists( - input_dict=self._target_extrapolation_poses_by_target_id, - age_from_value_function=PoseData.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._parameters.POSE_SINGLE_CAMERA_EXTRAPOLATION_LIMIT_RAY_AGE_SECONDS) - changed |= modified - self._target_depths_by_target_depth_key, modified = self._clear_old_values_from_dict_of_lists( - input_dict=self._target_depths_by_target_depth_key, - age_from_value_function=TargetDepth.age_seconds, - query_timestamp=query_timestamp, - maximum_age_seconds=self._parameters.POSE_SINGLE_CAMERA_DEPTH_LIMIT_AGE_SECONDS) - changed |= modified - return changed - - @staticmethod - def _clear_old_values_from_dict( - input_dict: dict[KeyType, ValueType], - age_from_value_function: Callable[[ValueType, datetime.datetime], float], - query_timestamp: datetime.datetime, - maximum_age_seconds: float - ) -> tuple[dict[KeyType, ValueType], bool]: # modified_dictionary, changes_found - changed: bool = False - output_dict: dict[KeyType, ValueType] = dict() - for input_key, input_value in input_dict.items(): - age_seconds: float = age_from_value_function(input_value, query_timestamp) - if age_seconds <= maximum_age_seconds: - output_dict[input_key] = input_value - else: - changed = True - return output_dict, changed - - @staticmethod - def _clear_old_values_from_dict_of_lists( - input_dict: dict[KeyType, list[ValueType]], - age_from_value_function: Callable[[ValueType, datetime.datetime], float], - query_timestamp: datetime.datetime, - maximum_age_seconds: float - ) -> tuple[dict[KeyType, list[ValueType]], bool]: # modified_dictionary, changes_found - changed: bool = False - output_dict: dict[KeyType, list[ValueType]] = dict() - for input_key in input_dict.keys(): - output_poses_for_label: list[ValueType] = list() - for pose in input_dict[input_key]: - age_seconds: float = age_from_value_function(pose, query_timestamp) - if age_seconds <= maximum_age_seconds: - output_poses_for_label.append(pose) - else: - changed = True - output_dict[input_key] = output_poses_for_label - return output_dict, changed - - def _corresponding_point_list_in_target( - self, - target_id: uuid.UUID - ) -> list[list[float]]: - points = list() - if target_id not in self._targets: - raise RuntimeError(f"Could not find target {str(target_id)} in domain.") - target: TargetBase = self._targets[target_id] - if isinstance(target, TargetMarker): - half_width = target.marker_size / 2.0 - points += [ - [-half_width, half_width, 0.0], - [half_width, half_width, 0.0], - [half_width, -half_width, 0.0], - [-half_width, -half_width, 0.0]] - return points - - def _estimate_target_pose_from_ray_set( - self, - target: TargetBase, - ray_set: MarkerRaySet - ) -> tuple[list[float], list[float]]: - corners = numpy.array([ray_set.image_points], dtype="float32") - detector_label: str = ray_set.detector_label - camera_matrix: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[detector_label].get_matrix(), dtype="float32") - distortion_coefficients: numpy.ndarray = numpy.array( - self._intrinsics_by_detector_label[detector_label].get_distortion_coefficients(), dtype="float32") - rotation_vector: numpy.ndarray - translation_vector: numpy.ndarray - if isinstance(target, TargetMarker): - half_width = target.marker_size / 2.0 - reference_points: numpy.ndarray = numpy.array([ - [-half_width, half_width, 0.0], - [ half_width, half_width, 0.0], - [ half_width, -half_width, 0.0], - [-half_width, -half_width, 0.0]], - dtype="float32") - reference_points = numpy.reshape(reference_points, newshape=(1, 4, 3)) - corners = numpy.reshape(corners, newshape=(1, 4, 2)) - _, rotation_vector, translation_vector = cv2.solvePnP( - objectPoints=reference_points, - imagePoints=corners, - cameraMatrix=camera_matrix, - distCoeffs=distortion_coefficients) - else: - raise NotImplementedError("Only targets that are boards or markers are supported.") - - rotation_vector = rotation_vector.flatten() - translation_vector = translation_vector.flatten() - object_to_camera_matrix = numpy.identity(4, dtype="float32") - object_to_camera_matrix[0:3, 0:3] = Rotation.from_rotvec(rotation_vector).as_matrix() - object_to_camera_matrix[0:3, 3] = translation_vector[0:3] - object_to_detector_matrix = MathUtils.image_to_opengl_transformation_matrix(object_to_camera_matrix) - detector_to_reference_matrix: Matrix4x4 = ray_set.detector_to_reference_matrix - object_to_reference_matrix = numpy.matmul( - detector_to_reference_matrix.as_numpy_array(), object_to_detector_matrix) - position = list(object_to_reference_matrix[0:3, 3]) - quaternion = list(Rotation.from_matrix(object_to_reference_matrix[0:3, 0:3]).as_quat(canonical=True)) - return position, quaternion - - def _estimate_detector_pose_relative_to_reference(self): - image_point_sets_by_image_key, image_point_set_keys_with_reference_visible = self._update() - for image_point_sets_key, image_point_sets in image_point_sets_by_image_key.items(): - detector_label = image_point_sets_key.detector_label - board_image_point_sets = [ - image_point_set for image_point_set in image_point_sets - if image_point_set.marker_id in self._board_marker_ids - ] - - # Create a dictionary to map marker_id to its index in _board_marker_ids - marker_id_to_index = {marker_id: index for index, marker_id in enumerate(self._board_marker_ids)} - - # Sort the board_image_point_sets based on the order in _board_marker_ids - board_image_point_sets.sort(key=lambda x: marker_id_to_index[x.marker_id]) - - intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] - - image_points = [] - detected_marker_positions = [] - for image_point_set in board_image_point_sets: - image_points += image_point_set.points - detected_marker_positions.append( - self._board_marker_positions[marker_id_to_index[image_point_set.marker_id]]) - - if len(detected_marker_positions) == 0: - continue # Skip if no markers are detected - - half_width: float = self._board_marker_size / 2.0 - reference_points = [] - for position in detected_marker_positions: - single_reference_points = numpy.array([ - [position[0] + half_width, position[1] - half_width, 0.0], - [position[0] + half_width, position[1] + half_width, 0.0], - [position[0] - half_width, position[1] + half_width, 0.0], - [position[0] - half_width, position[1] - half_width, 0.0], - ]) - reference_points.extend(single_reference_points) - - reference_points = numpy.array(reference_points, dtype="float32") - reference_points = numpy.reshape(reference_points, newshape=(1, len(reference_points), 3)) - image_points = numpy.reshape(numpy.array(image_points, dtype="float32"), newshape=(1, len(image_points), 2)) - - reference_found: bool - rotation_vector: numpy.ndarray - translation_vector: numpy.ndarray - reference_found, rotation_vector, translation_vector = cv2.solvePnP( - objectPoints=reference_points, - imagePoints=image_points, - cameraMatrix=numpy.asarray(intrinsics.get_matrix(), dtype="float32"), - distCoeffs=numpy.asarray(intrinsics.get_distortion_coefficients(), dtype="float32")) - if not reference_found: - continue # Camera does not see reference board - - rotation_vector = rotation_vector.flatten() - translation_vector = translation_vector.flatten() - reference_to_camera_matrix = numpy.identity(4, dtype="float32") - reference_to_camera_matrix[0:3, 0:3] = Rotation.from_rotvec(rotation_vector).as_matrix() - reference_to_camera_matrix[0:3, 3] = translation_vector - reference_to_detector_matrix = MathUtils.image_to_opengl_transformation_matrix(reference_to_camera_matrix) - detector_to_reference_opengl = numpy.linalg.inv(reference_to_detector_matrix) - self._poses_by_detector_label[detector_label] = Matrix4x4.from_numpy_array(detector_to_reference_opengl) - - def _estimate_target_pose_relative_to_reference(self): - image_point_sets_by_image_key, image_point_set_keys_with_reference_visible = self._update() - valid_image_point_sets: list[MarkerCorners] = list() - for image_point_sets_key in image_point_set_keys_with_reference_visible: - image_point_sets = image_point_sets_by_image_key[image_point_sets_key] - for image_point_set in image_point_sets: - valid_image_point_sets.append(image_point_set) - - # Calculate rays - # Only the most recent points per detector/marker pair are used, - # so if we process the most recent first, we can detect and - # discard the older point sets and avoid unnecessary processing - valid_image_point_sets.sort(key=lambda x: x.timestamp, reverse=True) - for image_point_set in valid_image_point_sets: - image_timestamp = image_point_set.timestamp - marker_key = MarkerKey( - detector_label=image_point_set.detector_label, - marker_id=str(image_point_set.marker_id)) - if marker_key in self._marker_rayset_by_marker_key: - if self._marker_rayset_by_marker_key[marker_key].image_timestamp > image_timestamp: - continue # A newer timestamp was found in this iteration. Skip the older one. - for pose in self._detector_poses: - if pose.target_id == image_point_set.detector_label: - detector_to_reference_matrix: Matrix4x4 = pose.object_to_reference_matrix - self._marker_rayset_by_marker_key[marker_key] = self._calculate_marker_ray_set( - image_point_set=image_point_set, - detector_to_reference_matrix=detector_to_reference_matrix) - - # Create a dictionary that maps marker ID's to a list of *recent* rays - ray_sets_by_marker_id: dict[str, list[MarkerRaySet]] = dict() - for marker_key, marker_ray_set in self._marker_rayset_by_marker_key.items(): - if (self._now_timestamp - marker_ray_set.image_timestamp).total_seconds() > \ - self._parameters.POSE_MULTI_CAMERA_LIMIT_RAY_AGE_SECONDS: - continue - marker_id = marker_key.marker_id - if marker_id not in ray_sets_by_marker_id: - ray_sets_by_marker_id[marker_id] = list() - ray_sets_by_marker_id[marker_id].append(marker_ray_set) - - # Sort rays by the size of quadrilateral. - # Larger marker size in image suggests more precision. - # After a certain number of intersections, - # there may be little point in processing additional (lower precision) ray sets. - for marker_id, ray_set_list in ray_sets_by_marker_id.items(): - ray_set_list.sort(key=lambda x: MathUtils.convex_quadrilateral_area(x.image_points), reverse=True) - ray_sets_by_marker_id[marker_id] = ray_set_list[0:self._parameters.MAXIMUM_RAY_COUNT_FOR_INTERSECTION] - - marker_count_by_marker_id: dict[str, int] = dict() - for marker_id, ray_set_list in ray_sets_by_marker_id.items(): - marker_count_by_marker_id[marker_id] = len(ray_set_list) - intersectable_marker_ids: list[str] = list() - nonintersectable_marker_ids: list[str] = list() - for marker_id, count in marker_count_by_marker_id.items(): - if count >= 2: - intersectable_marker_ids.append(marker_id) - else: - nonintersectable_marker_ids.append(marker_id) - - # intersect rays to find the 3D points for each marker corner in reference coordinates - corner_sets_reference_by_marker_id: dict[str, CornerSetReference] = dict() - rejected_intersection_marker_ids: list[str] = list() - for marker_id in intersectable_marker_ids: - intersections_appear_valid: bool = True # If something looks off, set this to False - ray_set_list: list[MarkerRaySet] = ray_sets_by_marker_id[marker_id] - corner_points_in_reference: list[list[float]] = list() - for corner_index in range(0, 4): - rays: list[Ray] = list() - if len(ray_set_list) == 0: - intersections_appear_valid = False - print("Warning: intersectable_marker_ids corresponds to no ray set list") - break - - for ray_set in ray_set_list: - rays.append(Ray( - source_point=ray_set.ray_origin_reference, - direction=ray_set.ray_directions_reference[corner_index])) - intersection_result = MathUtils.closest_intersection_between_n_lines( - rays=rays, - maximum_distance=self._parameters.INTERSECTION_MAXIMUM_DISTANCE) - if intersection_result.centroids.shape[0] == 0: - intersections_appear_valid = False - break - else: - corner_points_in_reference.append(intersection_result.centroid().tolist()) - if not intersections_appear_valid: - rejected_intersection_marker_ids.append(marker_id) - continue - corner_sets_reference_by_marker_id[marker_id] = CornerSetReference( - marker_id=marker_id, - corners=corner_points_in_reference, - ray_sets=ray_set_list) - - # We estimate the pose of each target based on the calculated intersections - # and the rays projected from each detector - for target_id, target in self._targets.items(): - marker_ids_in_target: list[str] - if isinstance(target, TargetMarker): - marker_ids_in_target = [target.marker_id] - else: - raise NotImplementedError("Only targets that are markers are supported.") - - marker_ids_with_intersections: list[str] = list() - marker_ids_with_rays: list[str] = list() - for marker_id in marker_ids_in_target: - if marker_id in corner_sets_reference_by_marker_id: - marker_ids_with_intersections.append(marker_id) - elif marker_id in ray_sets_by_marker_id: # Don't include if we have (presumably precise) intersections - marker_ids_with_rays.append(marker_id) - - if len(marker_ids_with_intersections) <= 0 and len(marker_ids_with_rays) <= 0: - continue # No information on which to base a pose - - # Determine how many markers and how many detectors are involved - marker_id_set: set[str] = set() - one_detector_only: bool = True - detector_set: set[str] = set() - ray_sets: list[MarkerRaySet] = list() - for marker_id in marker_ids_with_intersections: - marker_id_set.add(corner_sets_reference_by_marker_id[marker_id].marker_id) - ray_sets += corner_sets_reference_by_marker_id[marker_id].ray_sets - one_detector_only = False - for marker_id in marker_ids_with_rays: - marker_id_set.add(marker_id) - ray_sets += ray_sets_by_marker_id[marker_id] - assert (len(marker_id_set) > 0) - one_marker_only: bool = len(marker_id_set) == 1 - - for ray_set in ray_sets: - detector_set.add(ray_set.detector_label) - one_detector_only &= (len(detector_set) == 1) - - # Try to find a solution for this matrix - object_to_reference_matrix: numpy.array = numpy.identity(4, dtype="float32") - - if one_detector_only and one_marker_only: - marker_id = marker_ids_with_rays[0] - ray_set = ray_sets_by_marker_id[marker_id][0] - position, orientation = self._estimate_target_pose_from_ray_set( - target=target, - ray_set=ray_set) - object_to_reference_matrix[0:3, 3] = position - object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(orientation).as_matrix() - - else: - # Fill in the required variables for the customized iterative closest point - initial_object_to_reference_estimated: bool = False - initial_object_to_reference_matrix = numpy.identity(4, dtype="float32") - object_known_points: list[list[float]] = list() - reference_known_points: list[list[float]] = list() - object_ray_points: list[list[float]] = list() - reference_rays: list[Ray] = list() - iterative_closest_point_parameters = IterativeClosestPointParameters( - termination_iteration_count=self._parameters.icp_termination_iteration_count, - termination_delta_translation=self._parameters.icp_termination_translation, - termination_delta_rotation_radians=self._parameters.icp_termination_rotation_radians, - termination_mean_point_distance=self._parameters.icp_termination_mean_point_distance, - termination_rms_point_distance=self._parameters.icp_termination_rms_point_distance) - - if len(marker_ids_with_intersections) >= 1: - reference_points_for_intersections: list[list[float]] = list() - for marker_id in marker_ids_with_intersections: - corner_set_reference = corner_sets_reference_by_marker_id[marker_id] - reference_points_for_intersections += corner_set_reference.corners - object_points_for_intersections = self._corresponding_point_list_in_target(target_id=target_id) - object_known_points += object_points_for_intersections - reference_known_points += reference_points_for_intersections - initial_object_to_reference_matrix = MathUtils.register_corresponding_points( - point_set_from=object_points_for_intersections, - point_set_to=reference_points_for_intersections) - initial_object_to_reference_estimated = True - - # pose estimation based on ArUco directly, used *only* for initial pose estimation - estimated_positions: list[list[float]] = list() - estimated_orientations: list[list[float]] = list() # quaternions - for marker_id in marker_ids_with_rays: - ray_set_list = ray_sets_by_marker_id[marker_id] - for ray_set in ray_set_list: - assert (len(ray_set.ray_directions_reference) == 4) - reference_rays_for_set: list[Ray] = list() - for corner_index in range(0, 4): - reference_rays_for_set.append(Ray( - source_point=ray_set.ray_origin_reference, - direction=ray_set.ray_directions_reference[corner_index])) - reference_rays += reference_rays_for_set - object_points_for_set = self._corresponding_point_list_in_target(target_id=target_id) - object_ray_points += object_points_for_set - if not initial_object_to_reference_estimated: - position, orientation = self._estimate_target_pose_from_ray_set(target, ray_set) - estimated_positions.append(position) - estimated_orientations.append(orientation) - if not initial_object_to_reference_estimated: - mean_position = numpy.array([0.0, 0.0, 0.0]) - for position in estimated_positions: - mean_position += position - mean_position /= len(estimated_positions) - initial_object_to_reference_matrix[0:3, 3] = mean_position - mean_orientation = MathUtils.average_quaternion(estimated_orientations) - initial_object_to_reference_matrix[0:3, 0:3] = Rotation.from_quat(mean_orientation).as_matrix() - - icp_output = MathUtils.iterative_closest_point_for_points_and_rays( - source_known_points=object_known_points, - target_known_points=reference_known_points, - source_ray_points=object_ray_points, - target_rays=reference_rays, - initial_transformation_matrix=initial_object_to_reference_matrix, - parameters=iterative_closest_point_parameters) - object_to_reference_matrix = icp_output.source_to_target_matrix.as_numpy_array() - - # Compute a depth from each detector, - # find newest ray_set for each detector - newest_ray_set_by_detector_label: dict[str, MarkerRaySet] = dict() - for ray_set in ray_sets: - detector_label = ray_set.detector_label - if detector_label not in newest_ray_set_by_detector_label: - newest_ray_set_by_detector_label[detector_label] = ray_set - elif ray_set.image_timestamp > newest_ray_set_by_detector_label[detector_label].image_timestamp: - newest_ray_set_by_detector_label[detector_label] = ray_set - # Record depth - for detector_label in newest_ray_set_by_detector_label: - newest_ray_set = newest_ray_set_by_detector_label[detector_label] - target_depth_key = TargetDepthKey(target_id=target_id, detector_label=detector_label) - if target_depth_key not in self._target_depths_by_target_depth_key: - self._target_depths_by_target_depth_key[target_depth_key] = list() - detector_to_reference_matrix: Matrix4x4 = newest_ray_set.detector_to_reference_matrix - detector_position_reference: numpy.ndarray = detector_to_reference_matrix.as_numpy_array()[0:3, 3] - object_position_reference: numpy.array = object_to_reference_matrix[0:3, 3] - depth = numpy.linalg.norm(object_position_reference - detector_position_reference) - target_depth = TargetDepth( - target_id=target_id, - detector_label=detector_label, - estimate_timestamp=newest_ray_set.image_timestamp, - depth=depth) - self._target_depths_by_target_depth_key[target_depth_key].append(target_depth) - # If only visible to one camera, use the depth to denoise - if one_detector_only: - detector_label = detector_set.pop() - detector_to_reference_matrix: Matrix4x4 = \ - newest_ray_set_by_detector_label[detector_label].detector_to_reference_matrix - detector_position_reference = detector_to_reference_matrix.as_numpy_array()[0:3, 3] - target_position_reference = object_to_reference_matrix[0:3, 3] - depth_vector_reference = target_position_reference - detector_position_reference - old_depth = float(numpy.linalg.norm(depth_vector_reference)) - target_depth_key = TargetDepthKey(target_id=target_id, detector_label=detector_label) - new_depth = float(numpy.average( - [target_depth.depth for target_depth in - self._target_depths_by_target_depth_key[ - target_depth_key]])) + self._parameters.POSE_SINGLE_CAMERA_DEPTH_CORRECTION - depth_factor = new_depth / old_depth - object_to_reference_matrix[0:3, 3] = detector_position_reference + depth_factor * depth_vector_reference - - pose = PoseData( - target_id=str(target_id), - object_to_reference_matrix=Matrix4x4.from_numpy_array(object_to_reference_matrix), - ray_sets=ray_sets) - - if target_id not in self._target_extrapolation_poses_by_target_id: - self._target_extrapolation_poses_by_target_id[target_id] = list() - self._target_extrapolation_poses_by_target_id[target_id].append(pose) - - self._poses_by_target_id[target_id] = pose - - def _update(self): - now_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) - self._now_timestamp = now_timestamp - poses_need_update: bool = self._clear_old_values(now_timestamp) - poses_need_update |= len(self._marker_corners_since_update) > 0 - if not poses_need_update: - return - - self._poses_by_target_id.clear() - - image_point_sets_by_image_key: dict[ImagePointSetsKey, list[MarkerCorners]] = dict() - for marker_corners in self._marker_corners_since_update: - detector_label = marker_corners.detector_label - image_point_sets_key = ImagePointSetsKey(detector_label, marker_corners.timestamp) - if image_point_sets_key not in image_point_sets_by_image_key: - image_point_sets_by_image_key[image_point_sets_key] = list() - image_point_sets_by_image_key[image_point_sets_key].append(marker_corners) - self._marker_corners_since_update.clear() - - image_point_set_keys_with_reference_visible: list[ImagePointSetsKey] = list() - for image_point_sets_key, image_point_sets in image_point_sets_by_image_key.items(): - image_point_set_keys_with_reference_visible.append(image_point_sets_key) - - return image_point_sets_by_image_key, image_point_set_keys_with_reference_visible - diff --git a/src/controller/connection.py b/src/controller/connection.py index 41596c1..91bc189 100644 --- a/src/controller/connection.py +++ b/src/controller/connection.py @@ -18,7 +18,7 @@ StatusMessage, \ Target, \ TimestampGetResponse -from src.detector.api import \ +from src.detector import \ IntrinsicCalibrationCalculateResponse, \ IntrinsicCalibrationImageAddResponse, \ IntrinsicCalibrationImageGetResponse, \ @@ -37,11 +37,11 @@ DetectorStopRequest, \ AnnotatorParametersGetResponse, \ AnnotatorParametersSetRequest -from src.pose_solver.api import \ +from src.mixer import \ PoseSolverGetPosesResponse, \ PoseSolverSetTargetsRequest, \ - PoseSolverStartRequest, \ - PoseSolverStopRequest + MixerStartRequest, \ + MixerStopRequest import abc import datetime from enum import StrEnum @@ -692,10 +692,10 @@ def __init__( self.recording = [] def create_deinitialization_request_series(self) -> MCTRequestSeries: - return MCTRequestSeries(series=[PoseSolverStopRequest()]) + return MCTRequestSeries(series=[MixerStopRequest()]) def create_initialization_request_series(self) -> MCTRequestSeries: - series: list[MCTRequest] = [PoseSolverStartRequest()] + series: list[MCTRequest] = [MixerStartRequest()] if self.configured_targets is not None: series.append(PoseSolverSetTargetsRequest(targets=self.configured_targets)) return MCTRequestSeries(series=series) diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 647a212..23f61fc 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -32,13 +32,13 @@ Detector, \ DetectorFrameGetRequest, \ DetectorFrameGetResponse -from src.pose_solver import \ - MixerBackend, \ +from src.mixer import \ + Mixer, \ + MixerUpdateIntrinsicParametersRequest, \ PoseSolverAddDetectorFrameRequest, \ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ - PoseSolverSetExtrinsicRequest, \ - PoseSolverSetIntrinsicRequest + PoseSolverSetExtrinsicRequest import datetime from enum import IntEnum, StrEnum import hjson @@ -58,7 +58,7 @@ _ROLE_LABEL: Final[str] = "controller" _SUPPORTED_ROLES: Final[list[str]] = [ Detector.get_role_label(), - MixerBackend.get_role_label()] + Mixer.get_role_label()] _TIME_SYNC_SAMPLE_MAXIMUM_COUNT: Final[int] = 5 @@ -171,7 +171,7 @@ def add_connection( return_value: DetectorConnection = DetectorConnection(component_address=component_address) self._connections[label] = return_value return return_value - elif component_address.role == MixerBackend.get_role_label(): + elif component_address.role == Mixer.get_role_label(): return_value: PoseSolverConnection = PoseSolverConnection(component_address=component_address) self._connections[label] = return_value return return_value @@ -252,7 +252,7 @@ def _advance_startup_state(self) -> None: message=f"Failed to find DetectorConnection with label {detector_label}.") continue if detector_connection.current_intrinsic_parameters is not None: - requests.append(PoseSolverSetIntrinsicRequest( + requests.append(MixerUpdateIntrinsicParametersRequest( detector_label=detector_label, intrinsic_parameters=detector_connection.current_intrinsic_parameters)) if detector_connection.configured_transform_to_reference is not None: @@ -284,7 +284,7 @@ def get_active_pose_solver_labels(self) -> list[str]: """ See get_component_labels. """ - return self.get_component_labels(role=MixerBackend.get_role_label(), active=True) + return self.get_component_labels(role=Mixer.get_role_label(), active=True) def get_component_labels( self, @@ -589,11 +589,11 @@ def recording_stop(self): # Do not record if specified if report.role == Detector.get_role_label() and not self._recording_detector: continue - if report.role == MixerBackend.get_role_label() and not self._recording_pose_solver: + if report.role == Mixer.get_role_label() and not self._recording_pose_solver: continue - if self._recording_save_path is not None: - frames_dict = [frame.dict() for frame in connection.recording] + if isinstance(connection, DetectorConnection) and self._recording_save_path is not None: + frames_dict = [frame.model_dump() for frame in connection.recording] frames_json = json.dumps(frames_dict) with open(os.path.join(self._recording_save_path, report.role+"_log.json"), 'w') as f: f.write(frames_json) @@ -694,7 +694,7 @@ def start_up( raise RuntimeError("Cannot start up if controller isn't first stopped.") for connection in self._connections.values(): if mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == MixerBackend.get_role_label(): + connection.get_role() == Mixer.get_role_label(): continue connection.start_up() @@ -732,7 +732,7 @@ def update( all_connected: bool = True for connection in connections: if self._startup_mode == StartupMode.DETECTING_ONLY and \ - connection.get_role() == MixerBackend.get_role_label(): + connection.get_role() == Mixer.get_role_label(): continue if not connection.is_start_up_finished(): all_connected = False From 2da8ddf7b7c41059739682dd5e24e03de81da7f7 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Tue, 26 Aug 2025 17:17:51 -0400 Subject: [PATCH 23/33] ENH: Add extrinsic calibration panel to GUI --- src/common/calibration.py | 10 +- src/common/camera.py | 5 +- src/common/image_processing.py | 32 + src/detector/api.py | 1 + src/detector/detector.py | 9 +- src/gui/gui.py | 27 +- src/gui/panels/__init__.py | 3 +- src/gui/panels/extrinsics_panel.py | 662 ++++++++++++++++++ ...alibrator_panel.py => intrinsics_panel.py} | 8 +- .../specialized/calibration_image_table.py | 2 +- .../specialized/calibration_result_table.py | 2 +- src/mixer/__init__.py | 23 +- 12 files changed, 756 insertions(+), 28 deletions(-) create mode 100644 src/gui/panels/extrinsics_panel.py rename src/gui/panels/{calibrator_panel.py => intrinsics_panel.py} (99%) diff --git a/src/common/calibration.py b/src/common/calibration.py index b777ae8..0a04503 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -1,5 +1,3 @@ -from urllib import request - from .image_processing import \ ImageFormat, \ ImageResolution, \ @@ -80,7 +78,7 @@ class _ImageMetadata(BaseModel): filepath: str = Field() detector_label: str = Field() resolution: ImageResolution = Field() - image_label: str = Field(default_factory=str) # human-readable label + label: str = Field(default_factory=str) # human-readable label timestamp_utc_iso8601: str = Field( default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) state: _ImageState = Field(default=_ImageState.SELECT) @@ -99,7 +97,7 @@ class _ResultMetadata(BaseModel): identifier: str = Field() filepath: str = Field() resolution: ImageResolution | None = Field(default=None) # Used in intrinsic, not currently used in extrinsic - result_label: str = Field(default_factory=str) + label: str = Field(default_factory=str) timestamp_utc_iso8601: str = Field( default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc).isoformat()) image_identifiers: list[str] = Field(default_factory=list) @@ -425,7 +423,7 @@ def update_image_metadata( "It may be prudent to either manually correct it, or recreate it.") matched_metadata.state = image_state if image_label is not None: - matched_metadata.image_label = image_label + matched_metadata.label = image_label self._save_data_ledger() # noinspection DuplicatedCode @@ -453,7 +451,7 @@ def update_result_metadata( "Please manually correct it, or recreate it.") matched_metadata.state = result_state if result_label is not None: - matched_metadata.result_label = result_label + matched_metadata.label = result_label self._save_data_ledger() diff --git a/src/common/camera.py b/src/common/camera.py index 1a8c333..8481697 100644 --- a/src/common/camera.py +++ b/src/common/camera.py @@ -71,8 +71,9 @@ def get_encoded_image( self, image_format: ImageFormat, requested_resolution: ImageResolution | None # None means to not alter the image dimensions - ) -> str: + ) -> tuple[str, ImageResolution]: # ID, original_resolution image: numpy.ndarray = self.get_image() + original_resolution: ImageResolution = ImageResolution(x_px=image.shape[1], y_px=image.shape[0]) if requested_resolution is not None: image = cv2.resize(src=image, dsize=(requested_resolution.x_px, requested_resolution.y_px)) encoded_frame: bool @@ -81,7 +82,7 @@ def get_encoded_image( encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() # noinspection PyTypeChecker encoded_image_rgb_base64: str = base64.b64encode(encoded_image_rgb_bytes) - return encoded_image_rgb_base64 + return encoded_image_rgb_base64, original_resolution def get_status(self) -> Status: return self._status diff --git a/src/common/image_processing.py b/src/common/image_processing.py index c6a4533..cf1aa69 100644 --- a/src/common/image_processing.py +++ b/src/common/image_processing.py @@ -174,6 +174,38 @@ def image_to_bytes( encoded_image_rgb_bytes: bytes = encoded_image_rgb_single_row.tobytes() return encoded_image_rgb_bytes + @staticmethod + def partition_rect( + available_size_px: tuple[int, int], # x, y + partition_count: int + ) -> tuple[tuple[int, int], list[tuple[int, int]]]: # ((width_px, height_px), [(x_px, y_px)]) + """ + Partition a rectangular area into a grid, and return the rectangle definitions corresponding to the cells. + """ + width_cells: int = 1 + height_cells: int = 1 + cell_count: int = width_cells * height_cells + max_cell_count: int = 1000 # I don't think we'll ever get this high, but I want a theoretical iteration limit + while cell_count <= max_cell_count: + width_cells += 1 + cell_count = width_cells * height_cells + if cell_count >= partition_count: + break + height_cells += 1 + cell_count = width_cells * height_cells + if cell_count >= partition_count: + break + width_px: int = available_size_px[0] // width_cells + height_px: int = available_size_px[1] // width_cells + positions_px: list[tuple[int, int]] = list() + for cell_index in range(0, partition_count): + y_cell = cell_index // height_cells + x_cell = cell_index % width_cells + y_px = y_cell * height_px + x_px = x_cell * width_px + positions_px.append((x_px, y_px)) + return (width_px, height_px), positions_px + @staticmethod def scale_factor_for_available_space_px( source_resolution_px: tuple[int, int], diff --git a/src/detector/api.py b/src/detector/api.py index 319bf80..e45e6b1 100644 --- a/src/detector/api.py +++ b/src/detector/api.py @@ -70,6 +70,7 @@ def type_identifier() -> str: format: ImageFormat = Field() image_base64: str = Field() + original_resolution: ImageResolution = Field() class CameraParametersGetRequest(MCTRequest): diff --git a/src/detector/detector.py b/src/detector/detector.py index 933b2fc..f40d089 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -165,7 +165,8 @@ def calibration_image_add( **_kwargs ) -> IntrinsicCalibrationImageAddResponse | ErrorResponse: try: - image_base64: str = self._camera.get_encoded_image( + image_base64: str + image_base64, _ = self._camera.get_encoded_image( image_format=ImageFormat.FORMAT_PNG, requested_resolution=None) image_identifier: str = self._calibrator.add_image(image_base64=image_base64) @@ -310,15 +311,17 @@ def camera_image_get( key="request", arg_type=CameraImageGetRequest) encoded_image_base64: str + original_resolution: ImageResolution try: - encoded_image_base64 = self._camera.get_encoded_image( + encoded_image_base64, original_resolution = self._camera.get_encoded_image( image_format=request.format, requested_resolution=request.requested_resolution) except MCTCameraRuntimeError as e: return ErrorResponse(message=e.message) return CameraImageGetResponse( format=request.format, - image_base64=encoded_image_base64) + image_base64=encoded_image_base64, + original_resolution=original_resolution) def camera_parameters_get( self, diff --git a/src/gui/gui.py b/src/gui/gui.py index 6cefa10..23cdda9 100644 --- a/src/gui/gui.py +++ b/src/gui/gui.py @@ -1,7 +1,8 @@ from src.gui.panels import \ BasePanel, \ BoardBuilderPanel, \ - CalibratorPanel, \ + IntrinsicsPanel, \ + ExtrinsicsPanel, \ ControllerPanel, \ DetectorPanel, \ PoseSolverPanel @@ -14,7 +15,8 @@ CONTROLLER_LABEL: Final[str] = "Controller" DETECTOR_LABEL: Final[str] = "Detector" -CALIBRATOR_LABEL: Final[str] = "Calibrator" +INTRINSIC_CALIBRATOR_LABEL: Final[str] = "Intrinsic Calibrator" +EXTRINSIC_CALIBRATOR_LABEL: Final[str] = "Extrinsic Calibrator" BOARD_BUILDER_LABEL: Final[str] = "Board Builder" POSE_SOLVER_LABEL: Final[str] = "Pose Solver" @@ -28,7 +30,8 @@ class ControllerFrame(wx.Frame): _notebook: wx.Notebook _controller_panel: ControllerPanel _detector_panel: DetectorPanel - _calibrator_panel: CalibratorPanel + _intrinsics_panel: IntrinsicsPanel + _extrinsics_panel: ExtrinsicsPanel _board_builder_panel: BoardBuilderPanel _pose_solver_panel: PoseSolverPanel @@ -74,13 +77,22 @@ def __init__( text=DETECTOR_LABEL, select=False) - self._calibrator_panel = CalibratorPanel( + self._intrinsics_panel = IntrinsicsPanel( parent=self._notebook, controller=self._controller, status_message_source=self._status_message_source) self._notebook.AddPage( - page=self._calibrator_panel, - text=CALIBRATOR_LABEL, + page=self._intrinsics_panel, + text=INTRINSIC_CALIBRATOR_LABEL, + select=False) + + self._extrinsics_panel = ExtrinsicsPanel( + parent=self._notebook, + controller=self._controller, + status_message_source=self._status_message_source) + self._notebook.AddPage( + page=self._extrinsics_panel, + text=EXTRINSIC_CALIBRATOR_LABEL, select=False) self._board_builder_panel = BoardBuilderPanel( @@ -116,7 +128,8 @@ def on_page_changed(self, event: wx.BookCtrlEvent): pages: list[BasePanel] = [ self._controller_panel, self._detector_panel, - self._calibrator_panel, + self._intrinsics_panel, + self._extrinsics_panel, self._board_builder_panel, self._pose_solver_panel] for page in pages: diff --git a/src/gui/panels/__init__.py b/src/gui/panels/__init__.py index 8979663..0b40839 100644 --- a/src/gui/panels/__init__.py +++ b/src/gui/panels/__init__.py @@ -1,6 +1,7 @@ from .base_panel import BasePanel from .board_builder_panel import BoardBuilderPanel -from .calibrator_panel import CalibratorPanel +from .intrinsics_panel import IntrinsicsPanel +from .extrinsics_panel import ExtrinsicsPanel from .controller_panel import ControllerPanel from .detector_panel import DetectorPanel from .pose_solver_panel import PoseSolverPanel diff --git a/src/gui/panels/extrinsics_panel.py b/src/gui/panels/extrinsics_panel.py new file mode 100644 index 0000000..503c007 --- /dev/null +++ b/src/gui/panels/extrinsics_panel.py @@ -0,0 +1,662 @@ +from .base_panel import \ + BasePanel +from .feedback import \ + ImagePanel +from .parameters import \ + ParameterSelector, \ + ParameterText +from .specialized import \ + CalibrationImageTable, \ + CalibrationResultTable +from src.common import \ + ErrorResponse, \ + EmptyResponse, \ + ExtrinsicCalibrator, \ + ImageFormat, \ + ImageUtils, \ + IntrinsicCalibrator, \ + MCTRequestSeries, \ + MCTResponse, \ + MCTResponseSeries, \ + StatusMessageSource +from src.controller import \ + MCTController +from src.detector import \ + CameraImageGetRequest, \ + CameraImageGetResponse, \ + IntrinsicCalibrationResultGetActiveRequest, \ + IntrinsicCalibrationResultGetActiveResponse +from src.mixer import \ + ExtrinsicCalibrationCalculateRequest, \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationDeleteStagedRequest, \ + ExtrinsicCalibrationImageAddRequest, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetRequest, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListRequest, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationImageMetadataUpdateRequest, \ + ExtrinsicCalibrationResultGetRequest, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultMetadataListRequest, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + ExtrinsicCalibrationResultMetadataUpdateRequest, \ + MixerUpdateIntrinsicParametersRequest +import datetime +from io import BytesIO +import logging +import numpy +from typing import Optional +import uuid +import wx +import wx.grid + + +logger = logging.getLogger(__name__) + + +class ExtrinsicsPanel(BasePanel): + + _controller: MCTController + + _mixer_selector: ParameterSelector + _reload_button: wx.Button + _preview_toggle_button: wx.ToggleButton + _capture_button: wx.Button + _image_table: CalibrationImageTable + _image_label_textbox: ParameterText + _image_state_selector: ParameterSelector + _image_update_button: wx.Button + _calibrate_button: wx.Button + _calibrate_status_textbox: wx.TextCtrl + _result_table: CalibrationResultTable + _result_display_textbox: wx.TextCtrl + _result_label_textbox: ParameterText + _result_state_selector: ParameterSelector + _result_update_button: wx.Button + _image_panel: ImagePanel + + _control_blocking_request_ids: set[uuid.UUID] + _is_updating: bool # Some things should only trigger during explicit user events + _preview_request_ids: set[uuid.UUID] + _preview_images_by_detector_label: dict[str, numpy.ndarray] + _extrinsic_image: numpy.ndarray | None + _current_capture_timestamp: datetime.datetime | None # None indicates no capture in progress + _calibration_in_progress: bool + _image_metadata_list: list[IntrinsicCalibrator.ImageMetadata] + _result_metadata_list: list[IntrinsicCalibrator.ResultMetadata] + + def __init__( + self, + parent: wx.Window, + controller: MCTController, + status_message_source: StatusMessageSource, + name: str = "IntrinsicsPanel" + ): + super().__init__( + parent=parent, + status_message_source=status_message_source, + name=name) + self._controller = controller + + self._control_blocking_request_ids = set() + self._is_updating = False + self._preview_request_ids = set() + self._preview_images_by_detector_label = dict() + self._extrinsic_image = None + self._current_capture_timestamp = None + self._calibration_in_progress = False + self._image_metadata_list = list() + self._result_metadata_list = list() + + horizontal_split_sizer: wx.BoxSizer = wx.BoxSizer(orient=wx.HORIZONTAL) + + control_border_panel: wx.Panel = wx.Panel(parent=self) + control_border_box: wx.StaticBoxSizer = wx.StaticBoxSizer( + orient=wx.VERTICAL, + parent=control_border_panel) + control_panel: wx.ScrolledWindow = wx.ScrolledWindow( + parent=control_border_panel) + control_panel.SetScrollRate( + xstep=1, + ystep=1) + control_panel.ShowScrollbars( + horz=wx.SHOW_SB_NEVER, + vert=wx.SHOW_SB_ALWAYS) + + control_sizer: wx.BoxSizer = wx.BoxSizer(orient=wx.VERTICAL) + + self._mixer_selector = self.add_control_selector( + parent=control_panel, + sizer=control_sizer, + label="Mixer", + selectable_values=list()) + + self._reload_button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Reload Metadata") + + self._preview_toggle_button = wx.ToggleButton( + parent=control_panel, + label="Preview") + control_sizer.Add( + window=self._preview_toggle_button, + flags=wx.SizerFlags(0).Expand()) + control_sizer.AddSpacer(size=BasePanel.DEFAULT_SPACING_PX_VERTICAL) + + self._capture_button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Capture") + + self._calibrate_button: wx.Button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Calibrate") + + self._calibrate_status_textbox = wx.TextCtrl( + parent=control_panel, + style=wx.TE_READONLY | wx.TE_RICH) + self._calibrate_status_textbox.SetEditable(False) + self._calibrate_status_textbox.SetBackgroundColour(colour=wx.Colour(red=249, green=249, blue=249, alpha=255)) + control_sizer.Add( + window=self._calibrate_status_textbox, + flags=wx.SizerFlags(0).Expand()) + + self.add_horizontal_line_to_spacer( + parent=control_panel, + sizer=control_sizer) + + self._image_table = CalibrationImageTable(parent=control_panel) + self._image_table.SetMaxSize((-1, self._image_table.GetSize().GetHeight())) + control_sizer.Add( + window=self._image_table, + flags=wx.SizerFlags(0).Expand()) + control_sizer.AddSpacer(size=BasePanel.DEFAULT_SPACING_PX_VERTICAL) + + self._image_label_textbox: ParameterText = self.add_control_text_input( + parent=control_panel, + sizer=control_sizer, + label="Image Label") + + self._image_state_selector: ParameterSelector = self.add_control_selector( + parent=control_panel, + sizer=control_sizer, + label="Image State", + selectable_values=[state.name for state in IntrinsicCalibrator.ImageState]) + + self._image_update_button: wx.Button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Update Image") + + self.add_horizontal_line_to_spacer( + parent=control_panel, + sizer=control_sizer) + + self._result_table = CalibrationResultTable(parent=control_panel) + control_sizer.Add( + window=self._result_table, + flags=wx.SizerFlags(0).Expand()) + control_sizer.AddSpacer(size=BasePanel.DEFAULT_SPACING_PX_VERTICAL) + + self._result_display_textbox = wx.TextCtrl( + parent=control_panel, + style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH) + self._result_display_textbox.SetEditable(False) + self._result_display_textbox.SetBackgroundColour(colour=wx.Colour(red=249, green=249, blue=249, alpha=255)) + control_sizer.Add( + window=self._result_display_textbox, + flags=wx.SizerFlags(1).Align(wx.EXPAND)) + + self._result_label_textbox: ParameterText = self.add_control_text_input( + parent=control_panel, + sizer=control_sizer, + label="Result Label") + + self._result_state_selector: ParameterSelector = self.add_control_selector( + parent=control_panel, + sizer=control_sizer, + label="Result State", + selectable_values=[state.name for state in IntrinsicCalibrator.ResultState]) + + self._result_update_button: wx.Button = self.add_control_button( + parent=control_panel, + sizer=control_sizer, + label="Update Result") + + self.add_horizontal_line_to_spacer( + parent=control_panel, + sizer=control_sizer) + + control_spacer_sizer: wx.BoxSizer = wx.BoxSizer(orient=wx.HORIZONTAL) + control_sizer.Add( + sizer=control_spacer_sizer, + flags=wx.SizerFlags(1).Expand()) + + control_panel.SetSizerAndFit(sizer=control_sizer) + control_border_box.Add( + window=control_panel, + flags=wx.SizerFlags(1).Expand()) + control_border_panel.SetSizer(sizer=control_border_box) + horizontal_split_sizer.Add( + window=control_border_panel, + flags=wx.SizerFlags(50).Expand()) + + self._image_panel = ImagePanel(parent=self) + self._image_panel.SetBackgroundColour(colour=wx.BLACK) + horizontal_split_sizer.Add( + window=self._image_panel, + flags=wx.SizerFlags(50).Expand()) + + self.SetSizerAndFit(sizer=horizontal_split_sizer) + + self._mixer_selector.selector.Bind( + event=wx.EVT_CHOICE, + handler=self._on_mixer_reload) + self._reload_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_mixer_reload) + self._preview_toggle_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_preview_toggled) + self._capture_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_capture_pressed) + self._calibrate_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_calibrate_pressed) + self._image_table.table.Bind( + event=wx.grid.EVT_GRID_SELECT_CELL, + handler=self._on_image_metadata_selected) + self._image_update_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_image_update_pressed) + self._result_table.table.Bind( + event=wx.grid.EVT_GRID_SELECT_CELL, + handler=self._on_result_metadata_selected) + self._result_update_button.Bind( + event=wx.EVT_BUTTON, + handler=self._on_result_update_pressed) + + def handle_error_response( + self, + response: ErrorResponse + ): + super().handle_error_response(response=response) + if self._calibration_in_progress: + self._calibrate_status_textbox.SetForegroundColour(colour=wx.Colour(red=127, green=0, blue=0, alpha=255)) + self._calibrate_status_textbox.SetValue(f"Error: {response.message}") + + def handle_response_series( + self, + response_series: MCTResponseSeries, + task_description: Optional[str] = None, + expected_response_count: Optional[int] = None + ) -> None: + response: MCTResponse + for response in response_series.series: + if isinstance(response, CameraImageGetResponse): + self._handle_response_camera_image_get(response=response, detector_label=response_series.responder) + elif isinstance(response, ExtrinsicCalibrationCalculateResponse): + self._handle_response_extrinsic_calibration_calculate(response=response) + elif isinstance(response, ExtrinsicCalibrationImageAddResponse): + self._handle_response_extrinsic_calibration_image_add(response=response) + elif isinstance(response, ExtrinsicCalibrationImageGetResponse): + self._handle_response_extrinsic_calibration_image_get(response=response) + elif isinstance(response, ExtrinsicCalibrationResultGetResponse): + self._handle_response_extrinsic_calibration_result_get(response=response) + elif isinstance(response, ExtrinsicCalibrationImageMetadataListResponse): + self._handle_response_extrinsic_calibration_image_metadata_list(response=response) + elif isinstance(response, ExtrinsicCalibrationResultMetadataListResponse): + self._handle_response_extrinsic_calibration_result_metadata_list(response=response) + elif isinstance(response, IntrinsicCalibrationResultGetActiveResponse): + self._handle_response_intrinsic_calibration_result_get_active( + response=response, + detector_label=response_series.responder) + elif isinstance(response, ErrorResponse): + self.handle_error_response(response=response) + elif not isinstance(response, EmptyResponse): + self.handle_unknown_response(response=response) + + def on_page_select(self) -> None: + super().on_page_select() + self._update_ui_controls() + + def update_loop(self) -> None: + super().update_loop() + self._is_updating = True + + response_series: MCTResponseSeries | None + for request_id in self._control_blocking_request_ids: + _, response_series = self._controller.response_series_pop(request_series_id=request_id) + if response_series is not None: + self._control_blocking_request_ids.remove(request_id) + self.handle_response_series(response_series) + self._update_ui_controls() + + if self._preview_toggle_button.GetValue(): + for request_id in self._preview_request_ids: + _, response_series = self._controller.response_series_pop(request_series_id=request_id) + if response_series is not None and \ + len(response_series.series) > 0 and \ + isinstance(response_series.series[0], CameraImageGetResponse): + response: CameraImageGetResponse = response_series.series[0] + detector_label: str = response_series.responder + self._preview_images_by_detector_label[detector_label] = \ + ImageUtils.base64_to_image(response.image_base64) + + self._update_ui_image() + + self._is_updating = False + + def _handle_response_camera_image_get( + self, + response: CameraImageGetResponse, + detector_label: str + ) -> None: + # Note: This is for the control-blocking requests ONLY! + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageAddRequest( + image_base64=response.image_base64, + detector_label=detector_label, + timestamp_utc_iso8601=self._current_capture_timestamp.isoformat()), + ExtrinsicCalibrationImageMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + + def _handle_response_intrinsic_calibration_result_get_active( + self, + response: IntrinsicCalibrationResultGetActiveResponse, + detector_label: str + ) -> None: + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + MixerUpdateIntrinsicParametersRequest( + detector_label=detector_label, + intrinsic_parameters=response.intrinsic_calibration.calibrated_values)]) + if len(self._control_blocking_request_ids) <= 0: # This is the last intrinsic - we are ready to calculate + request_series.series.append(ExtrinsicCalibrationCalculateRequest()) + request_series.series.append(ExtrinsicCalibrationResultMetadataListRequest()) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + + def _handle_response_extrinsic_calibration_calculate( + self, + response: ExtrinsicCalibrationCalculateResponse + ) -> None: + if not self._calibration_in_progress: + self.status_message_source.enqueue_status_message( + severity="warning", + message=f"Received CalibrateResponse while no calibration is in progress.") + self._calibrate_status_textbox.SetForegroundColour(colour=wx.Colour(red=0, green=0, blue=127, alpha=255)) + self._calibrate_status_textbox.SetValue(f"Calibration {response.result_identifier} complete.") + self._result_display_textbox.SetValue(response.extrinsic_calibration.model_dump_json(indent=4)) + self._calibration_in_progress = False + + # noinspection PyUnusedLocal + def _handle_response_extrinsic_calibration_image_add( + self, + response: ExtrinsicCalibrationImageAddResponse + ) -> None: + if len(self._control_blocking_request_ids) <= 0: + self._current_capture_timestamp = None + + def _handle_response_extrinsic_calibration_image_get( + self, + response: ExtrinsicCalibrationImageGetResponse + ) -> None: + self._extrinsic_image = ImageUtils.base64_to_image(response.image_base64) + + def _handle_response_extrinsic_calibration_image_metadata_list( + self, + response: ExtrinsicCalibrationImageMetadataListResponse + ) -> None: + self._image_metadata_list = response.metadata_list + self._image_table.update_contents(row_contents=self._image_metadata_list) + + def _handle_response_extrinsic_calibration_result_get( + self, + response: ExtrinsicCalibrationResultGetResponse + ) -> None: + self._result_display_textbox.SetValue(str(response.extrinsic_calibration.model_dump_json(indent=4))) + + def _handle_response_extrinsic_calibration_result_metadata_list( + self, + response: ExtrinsicCalibrationResultMetadataListResponse + ) -> None: + self._result_metadata_list = response.metadata_list + self._result_table.update_contents(row_contents=self._result_metadata_list) + + def _on_mixer_reload(self, _event: wx.CommandEvent) -> None: + self._image_metadata_list = list() + self._result_metadata_list = list() + self._calibrate_status_textbox.SetValue(str()) + self._result_display_textbox.SetValue(str()) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageMetadataListRequest(), + ExtrinsicCalibrationResultMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_preview_toggled(self, _event: wx.CommandEvent) -> None: + if self._is_updating: + return + self._image_table.set_selected_row_index(None) + self._update_ui_controls() + + def _on_capture_pressed(self, _event: wx.CommandEvent) -> None: + self._current_capture_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) + detector_labels: list[str] = self._controller.get_active_detector_labels() + for detector_label in detector_labels: + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + CameraImageGetRequest(format=ImageFormat.FORMAT_PNG)]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=detector_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_calibrate_pressed(self, _event: wx.CommandEvent) -> None: + self._calibrate_status_textbox.SetForegroundColour(colour=wx.Colour(red=0, green=0, blue=0, alpha=255)) + self._calibrate_status_textbox.SetValue("Calibrating...") + self._result_display_textbox.SetValue(str()) + detector_labels: list[str] = self._controller.get_active_detector_labels() + for detector_label in detector_labels: + request_series: MCTRequestSeries = MCTRequestSeries(series=[IntrinsicCalibrationResultGetActiveRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=detector_label, + request_series=request_series)) + self._calibration_in_progress = True + self._update_ui_controls() + + def _on_image_metadata_selected(self, _event: wx.grid.GridEvent) -> None: + if self._is_updating: + return + self._preview_toggle_button.SetValue(False) + image_index: int = self._image_table.get_selected_row_index() + image_identifier: str | None = self._image_metadata_list[image_index].identifier + if image_identifier is not None: + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageGetRequest(image_identifier=image_identifier)]) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_image_update_pressed(self, _event: wx.CommandEvent) -> None: + self._calibrate_status_textbox.SetValue(str()) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + image_index: int = self._image_table.get_selected_row_index() + image_identifier: str = self._image_metadata_list[image_index].identifier + image_state: IntrinsicCalibrator.ImageState = \ + ExtrinsicCalibrator.ImageState[self._image_state_selector.selector.GetStringSelection()] + image_label: str = self._image_label_textbox.textbox.GetValue() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationImageMetadataUpdateRequest( + image_identifier=image_identifier, + image_state=image_state, + image_label=image_label), + ExtrinsicCalibrationDeleteStagedRequest(), + ExtrinsicCalibrationImageMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_result_metadata_selected(self, _event: wx.grid.GridEvent) -> None: + if self._is_updating: + return + self._preview_toggle_button.SetValue(False) + self._result_display_textbox.SetValue(str()) + result_index: int = self._result_table.get_selected_row_index() + result_identifier: str | None = self._result_metadata_list[result_index].identifier + if result_identifier is not None: + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationResultGetRequest(result_identifier=result_identifier)]) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _on_result_update_pressed(self, _event: wx.CommandEvent) -> None: + self._result_display_textbox.SetValue(str()) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + result_index: int = self._result_table.get_selected_row_index() + result_identifier: str = self._result_metadata_list[result_index].identifier + result_state: ExtrinsicCalibrator.ResultState = \ + ExtrinsicCalibrator.ResultState[self._result_state_selector.selector.GetStringSelection()] + result_label: str = self._result_label_textbox.textbox.GetValue() + request_series: MCTRequestSeries = MCTRequestSeries(series=[ + ExtrinsicCalibrationResultMetadataUpdateRequest( + result_identifier=result_identifier, + result_state=result_state, + result_label=result_label), + ExtrinsicCalibrationDeleteStagedRequest(), + ExtrinsicCalibrationResultMetadataListRequest()]) + self._control_blocking_request_ids.add(self._controller.request_series_push( + connection_label=mixer_label, + request_series=request_series)) + self._update_ui_controls() + + def _update_ui_controls(self) -> None: + self._mixer_selector.Enable(False) + self._reload_button.Enable(False) + self._preview_toggle_button.Enable(False) + self._capture_button.Enable(False) + self._calibrate_button.Enable(False) + self._image_table.Enable(False) + self._image_label_textbox.Enable(False) + self._image_label_textbox.textbox.SetValue(str()) + self._image_state_selector.Enable(False) + self._image_state_selector.selector.SetStringSelection(str()) + self._image_update_button.Enable(False) + self._calibrate_status_textbox.Enable(False) + self._result_table.Enable(False) + self._result_display_textbox.Enable(False) + self._result_label_textbox.Enable(False) + self._result_label_textbox.textbox.SetValue(str()) + self._result_state_selector.Enable(False) + self._result_state_selector.selector.SetStringSelection(str()) + self._result_update_button.Enable(False) + if len(self._control_blocking_request_ids) > 0: + return # We're waiting for something + self._mixer_selector.Enable(True) + mixer_label: str = self._mixer_selector.selector.GetStringSelection() + if len(mixer_label) <= 0: + self._preview_toggle_button.SetValue(False) + return + self._reload_button.Enable(True) + self._preview_toggle_button.Enable(True) + self._capture_button.Enable(True) + # == NO RETURN GUARDS AFTER THIS POINT == + if len(self._image_metadata_list) > 0: + self._image_table.Enable(True) + image_index: int | None = self._image_table.get_selected_row_index() + if image_index is not None: + if image_index >= len(self._image_metadata_list): + self.status_message_source.enqueue_status_message( + severity="warning", + message=f"Selected image index {image_index} is out of bounds. Setting to None.") + self._image_table.set_selected_row_index(None) + else: + image_metadata: IntrinsicCalibrator.ImageMetadata = self._image_metadata_list[image_index] + self._image_label_textbox.Enable(True) + self._image_label_textbox.textbox.SetValue(image_metadata.label) + self._image_state_selector.Enable(True) + self._image_state_selector.selector.SetStringSelection(image_metadata.state.name) + self._image_update_button.Enable(True) + calibration_image_count: int = 0 + for image_metadata in self._image_metadata_list: + if image_metadata.state == IntrinsicCalibrator.ImageState.SELECT: + calibration_image_count += 1 + if calibration_image_count > 0: + self._calibrate_button.Enable(True) + self._calibrate_status_textbox.Enable(True) + if len(self._result_metadata_list) > 0: + self._result_table.Enable(True) + result_index: int | None = self._result_table.get_selected_row_index() + if result_index is not None: + if result_index >= len(self._result_metadata_list): + self.status_message_source.enqueue_status_message( + severity="warning", + message=f"Selected result index {result_index} is out of bounds. Setting to None.") + self._result_table.set_selected_row_index(None) + else: + result_metadata: IntrinsicCalibrator.ResultMetadata = self._result_metadata_list[result_index] + self._result_display_textbox.Enable(True) + self._result_label_textbox.Enable(True) + self._result_label_textbox.textbox.SetValue(result_metadata.label) + self._result_state_selector.Enable(True) + self._result_state_selector.selector.SetStringSelection(result_metadata.state.name) + self._result_update_button.Enable(True) + self.Layout() + self.Refresh() + self.Update() + + def _update_ui_image(self): + display_image: numpy.ndarray = ImageUtils.black_image(resolution_px=self._image_panel.GetSize()) + available_size_px: int = (display_image.shape[1], display_image.shape[0]) + if self._preview_toggle_button.GetValue(): + detector_labels: list[str] = self._controller.get_active_detector_labels() + image_dimensions: tuple[int, int] + image_positions: list[tuple[int, int]] + image_dimensions, image_positions = ImageUtils.partition_rect( + available_size_px=available_size_px, + partition_count=len(detector_labels)) + for detector_label, detector_index in enumerate(detector_labels): + if detector_label in self._preview_images_by_detector_label: + detector_image: numpy.ndarray = self._preview_images_by_detector_label[detector_label] + detector_image = ImageUtils.image_resize_to_fit( + opencv_image=detector_image, + available_size=image_dimensions) + display_image[ + image_positions[detector_index][0]:image_positions[detector_index][0] + image_dimensions[0], + image_positions[detector_index][1]:image_positions[detector_index][1] + image_dimensions[1] + ] = detector_image + elif self._extrinsic_image is not None: + extrinsic_image: numpy.ndarray = ImageUtils.image_resize_to_fit( + opencv_image=self._extrinsic_image, + available_size=available_size_px) + offset_x_px: int = (display_image.shape[0] - self._extrinsic_image.shape[0]) // 2 + offset_y_px: int = (display_image.shape[1] - self._extrinsic_image.shape[1]) // 2 + display_image[ + offset_x_px:offset_x_px + self._extrinsic_image.shape[1], + offset_y_px:offset_y_px + self._extrinsic_image.shape[0], + ] = extrinsic_image + + image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") + image_buffer_io: BytesIO = BytesIO(image_buffer) + wx_image: wx.Image = wx.Image(image_buffer_io) + wx_bitmap: wx.Bitmap = wx_image.ConvertToBitmap() + self._image_panel.set_bitmap(wx_bitmap) + self._image_panel.paint() diff --git a/src/gui/panels/calibrator_panel.py b/src/gui/panels/intrinsics_panel.py similarity index 99% rename from src/gui/panels/calibrator_panel.py rename to src/gui/panels/intrinsics_panel.py index 5aa16be..f6c0be9 100644 --- a/src/gui/panels/calibrator_panel.py +++ b/src/gui/panels/intrinsics_panel.py @@ -47,7 +47,7 @@ logger = logging.getLogger(__name__) -class CalibratorPanel(BasePanel): +class IntrinsicsPanel(BasePanel): _controller: MCTController @@ -80,7 +80,7 @@ def __init__( parent: wx.Window, controller: MCTController, status_message_source: StatusMessageSource, - name: str = "CalibratorPanel" + name: str = "IntrinsicsPanel" ): super().__init__( parent=parent, @@ -561,7 +561,7 @@ def _update_ui_controls(self) -> None: else: image_metadata: IntrinsicCalibrator.ImageMetadata = self._image_metadata_list[image_index] self._image_label_textbox.Enable(True) - self._image_label_textbox.textbox.SetValue(image_metadata.image_label) + self._image_label_textbox.textbox.SetValue(image_metadata.label) self._image_state_selector.Enable(True) self._image_state_selector.selector.SetStringSelection(image_metadata.state.name) self._image_update_button.Enable(True) @@ -590,7 +590,7 @@ def _update_ui_controls(self) -> None: result_metadata: IntrinsicCalibrator.ResultMetadata = self._result_metadata_list[result_index] self._result_display_textbox.Enable(True) self._result_label_textbox.Enable(True) - self._result_label_textbox.textbox.SetValue(result_metadata.result_label) + self._result_label_textbox.textbox.SetValue(result_metadata.label) self._result_state_selector.Enable(True) self._result_state_selector.selector.SetStringSelection(result_metadata.state.name) self._result_update_button.Enable(True) diff --git a/src/gui/panels/specialized/calibration_image_table.py b/src/gui/panels/specialized/calibration_image_table.py index b6f46ec..3e250e8 100644 --- a/src/gui/panels/specialized/calibration_image_table.py +++ b/src/gui/panels/specialized/calibration_image_table.py @@ -35,7 +35,7 @@ def _set_row_contents( self.table.SetCellValue( row=row_index, col=_COL_IDX_LABEL, - s=row_content.image_label) + s=row_content.label) self.table.SetCellValue( row=row_index, col=_COL_IDX_TIMESTAMP, diff --git a/src/gui/panels/specialized/calibration_result_table.py b/src/gui/panels/specialized/calibration_result_table.py index bf2ebc4..d7e69e9 100644 --- a/src/gui/panels/specialized/calibration_result_table.py +++ b/src/gui/panels/specialized/calibration_result_table.py @@ -35,7 +35,7 @@ def _set_row_contents( self.table.SetCellValue( row=row_index, col=_COL_IDX_LABEL, - s=row_content.result_label) + s=row_content.label) self.table.SetCellValue( row=row_index, col=_COL_IDX_TIMESTAMP, diff --git a/src/mixer/__init__.py b/src/mixer/__init__.py index 7523cbd..687e348 100644 --- a/src/mixer/__init__.py +++ b/src/mixer/__init__.py @@ -1,13 +1,30 @@ +from.mixer import Mixer from .api import \ + ExtrinsicCalibrationCalculateRequest, \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationDeleteStagedRequest, \ + ExtrinsicCalibrationImageAddRequest, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetRequest, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListRequest, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationImageMetadataUpdateRequest, \ + ExtrinsicCalibrationResultGetActiveRequest, \ + ExtrinsicCalibrationResultGetActiveResponse, \ + ExtrinsicCalibrationResultGetRequest, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultMetadataListRequest, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + ExtrinsicCalibrationResultMetadataUpdateRequest, \ PoseSolverAddDetectorFrameRequest, \ PoseSolverAddTargetRequest, \ PoseSolverAddTargetResponse, \ PoseSolverGetPosesRequest, \ PoseSolverGetPosesResponse, \ PoseSolverSetExtrinsicRequest, \ - MixerUpdateIntrinsicParametersRequest, \ PoseSolverSetReferenceRequest, \ PoseSolverSetTargetsRequest, \ MixerStartRequest, \ - MixerStopRequest -from.mixer import Mixer + MixerStopRequest, \ + MixerUpdateIntrinsicParametersRequest From 33c13aacf2ae8a48c81df7a3daa4825e05803f8d Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 27 Aug 2025 17:38:13 -0400 Subject: [PATCH 24/33] MNT: Update configuration file locations, terminologies, initialization... --- .../controller_calibration_config.json | 2 +- .../controller_detector_lone_local.json | 2 +- .../measure_detector_to_reference_config.json | 2 +- ..._measure_detector_to_reference_config.json | 2 +- .../controller}/slicer_test_config_1.json | 2 +- data/configuration/detector_config.json | 19 +++++++ data/configuration/mixer_config.json | 9 ++++ data/detector_config.json | 12 ----- data/pose_solver_config.json | 3 -- src/common/annotator.py | 5 +- src/common/calibration.py | 4 +- src/common/camera.py | 7 +-- src/controller/configuration.py | 2 +- src/controller/mct_controller.py | 2 +- src/detector/app.py | 17 +++--- src/detector/detector.py | 29 +++++++--- .../camera_opencv_capture_device.py | 9 +++- src/implementations/camera_picamera2.py | 2 +- src/main_detector.py | 1 - src/{main_pose_solver.py => main_mixer.py} | 3 +- src/mixer/app.py | 53 ++++++++++--------- src/mixer/mixer.py | 19 ++++--- 22 files changed, 124 insertions(+), 82 deletions(-) rename data/{ => configuration/controller}/controller_calibration_config.json (99%) rename data/{ => configuration/controller}/controller_detector_lone_local.json (87%) rename data/{ => configuration/controller}/measure_detector_to_reference_config.json (99%) rename data/{ => configuration/controller}/output_measure_detector_to_reference_config.json (99%) rename data/{ => configuration/controller}/slicer_test_config_1.json (99%) create mode 100644 data/configuration/detector_config.json create mode 100644 data/configuration/mixer_config.json delete mode 100644 data/detector_config.json delete mode 100644 data/pose_solver_config.json rename src/{main_pose_solver.py => main_mixer.py} (78%) diff --git a/data/controller_calibration_config.json b/data/configuration/controller/controller_calibration_config.json similarity index 99% rename from data/controller_calibration_config.json rename to data/configuration/controller/controller_calibration_config.json index c6c5b43..a20d4f7 100644 --- a/data/controller_calibration_config.json +++ b/data/configuration/controller/controller_calibration_config.json @@ -7,7 +7,7 @@ "port": 8001 } ], - "pose_solvers": [ + "mixers": [ { "label": "sol", "ip_address": "127.0.0.1", diff --git a/data/controller_detector_lone_local.json b/data/configuration/controller/controller_detector_lone_local.json similarity index 87% rename from data/controller_detector_lone_local.json rename to data/configuration/controller/controller_detector_lone_local.json index 0be740a..c013307 100644 --- a/data/controller_detector_lone_local.json +++ b/data/configuration/controller/controller_detector_lone_local.json @@ -7,5 +7,5 @@ "port": 8001 } ], - "pose_solvers": [] + "mixers": [] } diff --git a/data/measure_detector_to_reference_config.json b/data/configuration/controller/measure_detector_to_reference_config.json similarity index 99% rename from data/measure_detector_to_reference_config.json rename to data/configuration/controller/measure_detector_to_reference_config.json index 12b653b..1c8f817 100644 --- a/data/measure_detector_to_reference_config.json +++ b/data/configuration/controller/measure_detector_to_reference_config.json @@ -26,7 +26,7 @@ ] } ], - "pose_solvers": [ + "mixers": [ { "label": "ps", "ip_address": "127.0.0.1", diff --git a/data/output_measure_detector_to_reference_config.json b/data/configuration/controller/output_measure_detector_to_reference_config.json similarity index 99% rename from data/output_measure_detector_to_reference_config.json rename to data/configuration/controller/output_measure_detector_to_reference_config.json index 962a98d..56ef648 100644 --- a/data/output_measure_detector_to_reference_config.json +++ b/data/configuration/controller/output_measure_detector_to_reference_config.json @@ -62,7 +62,7 @@ ] } ], - "pose_solvers": [ + "mixers": [ { "label": "ps", "ip_address": "127.0.0.1", diff --git a/data/slicer_test_config_1.json b/data/configuration/controller/slicer_test_config_1.json similarity index 99% rename from data/slicer_test_config_1.json rename to data/configuration/controller/slicer_test_config_1.json index f3971d6..07aef26 100644 --- a/data/slicer_test_config_1.json +++ b/data/configuration/controller/slicer_test_config_1.json @@ -125,7 +125,7 @@ } } ], - "pose_solvers": [ + "mixers": [ { "label": "sol", "ip_address": "127.0.0.1", diff --git a/data/configuration/detector_config.json b/data/configuration/detector_config.json new file mode 100644 index 0000000..5bf4c83 --- /dev/null +++ b/data/configuration/detector_config.json @@ -0,0 +1,19 @@ +{ + "detector_label": "detector_01", + "intrinsic_calibrator": { + "implementation": "charuco_opencv", + "configuration": { + "data_path": "./calibration/intrinsic/" + } + }, + "camera": { + "implementation": "opencv_capture_device", + "configuration": { + "capture_device": 0 + } + }, + "annotator": { + "implementation": "aruco_opencv", + "configuration": {} + } +} diff --git a/data/configuration/mixer_config.json b/data/configuration/mixer_config.json new file mode 100644 index 0000000..dfae5df --- /dev/null +++ b/data/configuration/mixer_config.json @@ -0,0 +1,9 @@ +{ + "mixer_label": "mixer_01", + "extrinsic_calibrator": { + "implementation": "charuco_opencv", + "configuration": { + "data_path": "./calibration/intrinsic/" + } + } +} diff --git a/data/detector_config.json b/data/detector_config.json deleted file mode 100644 index 5af146b..0000000 --- a/data/detector_config.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "calibrator_configuration": { - "data_path": "./example_data_path/" - }, - "camera_configuration": { - "driver": "picamera2", - "capture_device": "0" - }, - "marker_configuration": { - "method": "aruco_opencv" - } -} diff --git a/data/pose_solver_config.json b/data/pose_solver_config.json deleted file mode 100644 index 6714195..0000000 --- a/data/pose_solver_config.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "serial_identifier": "example_pose_solver" -} diff --git a/src/common/annotator.py b/src/common/annotator.py index 4b10a26..54ddfbd 100644 --- a/src/common/annotator.py +++ b/src/common/annotator.py @@ -11,11 +11,10 @@ import datetime from enum import StrEnum import numpy -from pydantic import BaseModel, Field +from pydantic import BaseModel -class _Configuration(BaseModel): - method: str = Field() +class _Configuration(BaseModel): pass class _Status(StrEnum): diff --git a/src/common/calibration.py b/src/common/calibration.py index 0a04503..a1ede2b 100644 --- a/src/common/calibration.py +++ b/src/common/calibration.py @@ -476,10 +476,8 @@ class IntrinsicCalibrator(AbstractCalibrator, abc.ABC): def __init__( self, - configuration: Configuration | dict[str, ...], + configuration: Configuration, ): - if isinstance(configuration, dict): - configuration = IntrinsicCalibrator.Configuration(**configuration) super().__init__(configuration=configuration) # noinspection DuplicatedCode diff --git a/src/common/camera.py b/src/common/camera.py index 8481697..d796301 100644 --- a/src/common/camera.py +++ b/src/common/camera.py @@ -14,13 +14,10 @@ import datetime from enum import StrEnum import numpy -from pydantic import BaseModel, Field -from typing import Union +from pydantic import BaseModel -class _Configuration(BaseModel): - driver: str = Field() - capture_device: Union[str, int] = Field() # Not used by all drivers (notably it IS used by OpenCV) +class _Configuration(BaseModel): pass class _Status(StrEnum): diff --git a/src/controller/configuration.py b/src/controller/configuration.py index 4c5bbbd..5d86d32 100644 --- a/src/controller/configuration.py +++ b/src/controller/configuration.py @@ -31,4 +31,4 @@ class PoseSolverConfig(MCTComponentConfig): class MCTConfiguration(BaseModel): startup_mode: StartupMode = Field() detectors: list[DetectorComponentConfig] = Field(default_factory=list) - pose_solvers: list[PoseSolverConfig] = Field(default_factory=list) + mixers: list[PoseSolverConfig] = Field(default_factory=list) diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 23f61fc..60fca77 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -146,7 +146,7 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: detector_connection.configured_camera_parameters = detector.camera_parameters if detector.marker_parameters is not None: detector_connection.configured_marker_parameters = detector.marker_parameters - for pose_solver in configuration.pose_solvers: + for pose_solver in configuration.mixers: if not is_valid_ip_address(pose_solver): continue component_address: Connection.ComponentAddress = Connection.ComponentAddress( diff --git a/src/detector/app.py b/src/detector/app.py index c36ce9d..b6a9d06 100644 --- a/src/detector/app.py +++ b/src/detector/app.py @@ -11,11 +11,12 @@ from .detector import \ Detector from src.common import \ - Camera, \ Annotator, \ + Camera, \ EmptyResponse, \ ErrorResponse, \ ImageFormat, \ + IntrinsicCalibrator, \ TimestampGetRequest, \ TimestampGetResponse, \ TimeSyncStartRequest, \ @@ -35,7 +36,7 @@ def create_app() -> FastAPI: detector_configuration_filepath: str = \ - os.path.join(os.path.dirname(__file__), "..", "..", "data", "detector_config.json") + os.path.join(os.path.dirname(__file__), "..", "..", "data", "configuration", "detector_config.json") detector_configuration: Detector.Configuration with open(detector_configuration_filepath, 'r') as infile: detector_configuration_file_contents: str = infile.read() @@ -46,26 +47,30 @@ def create_app() -> FastAPI: # and allow end-users to register custom classes that are not necessarily shipped within this library. camera_type: type[Camera] - if detector_configuration.camera_configuration.driver == "opencv_capture_device": + if detector_configuration.camera.implementation == "opencv_capture_device": from src.implementations.camera_opencv_capture_device import OpenCVCaptureDeviceCamera camera_type = OpenCVCaptureDeviceCamera - elif detector_configuration.camera_configuration.driver == "picamera2": + elif detector_configuration.camera.implementation == "picamera2": from src.implementations.camera_picamera2 import Picamera2Camera camera_type = Picamera2Camera else: raise RuntimeError(f"Unsupported camera driver {detector_configuration.camera_configuration.driver}.") marker_type: type[Annotator] - if detector_configuration.annotator_configuration.method == "aruco_opencv": + if detector_configuration.annotator.implementation == "aruco_opencv": from src.implementations.annotator_aruco_opencv import ArucoOpenCVAnnotator marker_type = ArucoOpenCVAnnotator else: raise RuntimeError(f"Unsupported marker method {detector_configuration.annotator_configuration.method}.") + from src.implementations.intrinsic_charuco_opencv import CharucoOpenCVIntrinsicCalibrator + intrinsic_calibrator_type: type[IntrinsicCalibrator] = CharucoOpenCVIntrinsicCalibrator + detector = Detector( detector_configuration=detector_configuration, camera_type=camera_type, - annotator_type=marker_type) + annotator_type=marker_type, + intrinsic_calibrator_type=intrinsic_calibrator_type) detector_app = FastAPI() # CORS Middleware diff --git a/src/detector/detector.py b/src/detector/detector.py index f40d089..acfd9bd 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -62,6 +62,11 @@ _ROLE_LABEL: Final[str] = "detector" +class _ConfigurationSection(BaseModel): + implementation: str = Field() + configuration: dict = Field() + + # noinspection DuplicatedCode class Detector(MCTComponent): @@ -69,9 +74,10 @@ class Configuration(BaseModel): """ Top-level schema for Detector initialization data """ - calibrator_configuration: IntrinsicCalibrator.Configuration = Field() - camera_configuration: Camera.Configuration = Field() - annotator_configuration: Annotator.Configuration = Field() + detector_label: str = Field() + intrinsic_calibrator: _ConfigurationSection = Field() + camera: _ConfigurationSection = Field() + annotator: _ConfigurationSection = Field() _configuration: Configuration @@ -85,20 +91,27 @@ def __init__( self, detector_configuration: Configuration, camera_type: type[Camera], - annotator_type: type[Annotator] + annotator_type: type[Annotator], + intrinsic_calibrator_type: type[IntrinsicCalibrator] ): super().__init__( status_source_label="detector", send_status_messages_to_logger=True) self._configuration = detector_configuration - self._calibrator = IntrinsicCalibrator( - configuration=detector_configuration.calibrator_configuration) + # noinspection PyArgumentList + self._calibrator = intrinsic_calibrator_type( + configuration=intrinsic_calibrator_type.Configuration( + **detector_configuration.intrinsic_calibrator.configuration)) + # noinspection PyArgumentList self._camera = camera_type( - configuration=detector_configuration.camera_configuration, + configuration=camera_type.Configuration( + **detector_configuration.camera.configuration), status_message_source=self.get_status_message_source()) + # noinspection PyArgumentList self._annotator = annotator_type( - configuration=detector_configuration.annotator_configuration, + configuration=annotator_type.Configuration( + **detector_configuration.annotator.configuration), status_message_source=self.get_status_message_source()) self._frame_count = 0 diff --git a/src/implementations/camera_opencv_capture_device.py b/src/implementations/camera_opencv_capture_device.py index 6974fa2..cc9f1e4 100644 --- a/src/implementations/camera_opencv_capture_device.py +++ b/src/implementations/camera_opencv_capture_device.py @@ -20,6 +20,7 @@ import logging import numpy import os +from pydantic import Field from typing import Final @@ -66,8 +67,14 @@ _CAMERA_GAMMA_RANGE_MAXIMUM: Final[int] = 300 +class _Configuration(Camera.Configuration): + capture_device: int = Field() # Not used by all drivers (notably it IS used by OpenCV) + + class OpenCVCaptureDeviceCamera(Camera): + Configuration: type[_Configuration] = _Configuration + _capture: cv2.VideoCapture | None _capture_device_id: str | int @@ -83,7 +90,7 @@ def __init__( configuration=configuration, status_message_source=status_message_source) self._image = None - self._image_timestamp_utc = datetime.datetime.min + self._image_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) self._capture = None self._capture_device_id = configuration.capture_device self.set_status(Camera.Status.STOPPED) diff --git a/src/implementations/camera_picamera2.py b/src/implementations/camera_picamera2.py index 2f8c8c7..220b167 100644 --- a/src/implementations/camera_picamera2.py +++ b/src/implementations/camera_picamera2.py @@ -90,7 +90,7 @@ def __init__( configuration=configuration, status_message_source=status_message_source) self._image = None - self._image_timestamp_utc = datetime.datetime.min + self._image_timestamp_utc = datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) self._camera = Picamera2() self._camera_configuration = self._camera.create_video_configuration() self.set_status(Camera.Status.STOPPED) diff --git a/src/main_detector.py b/src/main_detector.py index 498e7bc..e912c35 100644 --- a/src/main_detector.py +++ b/src/main_detector.py @@ -8,7 +8,6 @@ def main(): app, reload=False, port=8001, - host="0.0.0.0", log_level=logging.INFO) diff --git a/src/main_pose_solver.py b/src/main_mixer.py similarity index 78% rename from src/main_pose_solver.py rename to src/main_mixer.py index 332a9b4..de55900 100644 --- a/src/main_pose_solver.py +++ b/src/main_mixer.py @@ -1,10 +1,11 @@ +from src.mixer.app import app import logging import uvicorn def main(): uvicorn.run( - "src.pose_solver.pose_solver_app:app", + app, reload=False, port=8000, log_level=logging.INFO) diff --git a/src/mixer/app.py b/src/mixer/app.py index 7cb8136..01e45be 100644 --- a/src/mixer/app.py +++ b/src/mixer/app.py @@ -7,8 +7,7 @@ Mixer from src.common import \ EmptyResponse, \ - ErrorResponse, \ - PoseSolver + ErrorResponse import asyncio from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware @@ -18,72 +17,76 @@ import os +# Note: This is the only implementation, currently. +from src.implementations.extrinsic_charuco_opencv import CharucoOpenCVExtrinsicCalibrator + + logger = logging.getLogger(__name__) -pose_solver = PoseSolver() def create_app() -> FastAPI: configuration_filepath: str = os.path.join( - os.path.dirname(__file__), "..", "..", "data", "pose_solver_config.json") + os.path.dirname(__file__), "..", "..", "data", "configuration", "mixer_config.json") configuration: Mixer.Configuration with open(configuration_filepath, 'r') as infile: file_contents: str = infile.read() configuration_dict = hjson.loads(file_contents) configuration = Mixer.Configuration(**configuration_dict) - pose_solver_api = Mixer( + + mixer = Mixer( configuration=configuration, - pose_solver=pose_solver) - pose_solver_app = FastAPI() + extrinsic_calibrator_type=CharucoOpenCVExtrinsicCalibrator) + mixer_app = FastAPI() # CORS Middleware origins = ["http://localhost"] - pose_solver_app.add_middleware( + mixer_app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"]) - @pose_solver_app.post("/add_detector_frame") + @mixer_app.post("/add_detector_frame") async def add_marker_corners( request: PoseSolverAddDetectorFrameRequest ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.pose_solver_add_detector_frame(request=request) + return mixer.pose_solver_add_detector_frame(request=request) - @pose_solver_app.post("/add_target") + @mixer_app.post("/add_target") async def add_target_marker( request: PoseSolverAddTargetRequest ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.pose_solver_add_target(request=request) + return mixer.pose_solver_add_target(request=request) - @pose_solver_app.get("/get_poses") + @mixer_app.get("/get_poses") async def get_poses() -> PoseSolverGetPosesResponse | ErrorResponse: - return pose_solver_api.pose_solver_get_poses() + return mixer.pose_solver_get_poses() - @pose_solver_app.post("/set_intrinsic_parameters") + @mixer_app.post("/set_intrinsic_parameters") async def set_intrinsic_parameters( request: MixerUpdateIntrinsicParametersRequest ) -> EmptyResponse | ErrorResponse: - return pose_solver_api.mixer_update_intrinsic_parameters(request=request) + return mixer.mixer_update_intrinsic_parameters(request=request) - @pose_solver_app.head("/start_capture") + @mixer_app.head("/start_capture") async def start_capture() -> None: - pose_solver_api.mixer_start() + mixer.mixer_start() - @pose_solver_app.head("/stop_capture") + @mixer_app.head("/stop_capture") async def stop_capture() -> None: - pose_solver_api.mixer_stop() + mixer.mixer_stop() - @pose_solver_app.websocket("/websocket") + @mixer_app.websocket("/websocket") async def websocket_handler(websocket: WebSocket) -> None: - await pose_solver_api.websocket_handler(websocket=websocket) + await mixer.websocket_handler(websocket=websocket) - @pose_solver_app.on_event("startup") + @mixer_app.on_event("startup") async def internal_update() -> None: - await pose_solver_api.update() + await mixer.update() asyncio.create_task(internal_update()) - return pose_solver_app + return mixer_app app: FastAPI = create_app() diff --git a/src/mixer/mixer.py b/src/mixer/mixer.py index ae44f4e..bc83b58 100644 --- a/src/mixer/mixer.py +++ b/src/mixer/mixer.py @@ -50,11 +50,17 @@ _ROLE_LABEL: Final[str] = "mixer" +class _ConfigurationSection(BaseModel): + implementation: str = Field() + configuration: dict = Field() + + # noinspection DuplicatedCode class Mixer(MCTComponent): class Configuration(BaseModel): - serial_identifier: str = Field() + mixer_label: str = Field() + extrinsic_calibrator: _ConfigurationSection = Field() class Status(StrEnum): STOPPED = "stopped" @@ -70,16 +76,17 @@ class Status(StrEnum): def __init__( self, configuration: Configuration, - pose_solver: PoseSolver, - extrinsic_calibrator: ExtrinsicCalibrator + extrinsic_calibrator_type: type[ExtrinsicCalibrator] ): super().__init__( - status_source_label=configuration.serial_identifier, + status_source_label=configuration.mixer_label, send_status_messages_to_logger=True) self._configuration = configuration - self._pose_solver = pose_solver - self._extrinsic_calibrator = extrinsic_calibrator + self._pose_solver = PoseSolver() + self._extrinsic_calibrator = extrinsic_calibrator_type( + configuration=extrinsic_calibrator_type.Configuration( + **self._configuration.extrinsic_calibrator.configuration)) self._status = Mixer.Status.STOPPED From 485228fe75cf62eef29e28d935e17751fc211bf2 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 28 Aug 2025 16:18:13 -0400 Subject: [PATCH 25/33] MNT: Update requirements and setup instructions --- .gitignore | 10 +++++----- doc/setup.md | 8 ++++---- pyproject.toml | 42 ++++++++++++++++++++++++++++++++++++++++++ requirements.txt | 13 ------------- setup/create_image.sh | 4 +--- 5 files changed, 52 insertions(+), 25 deletions(-) create mode 100644 pyproject.toml delete mode 100644 requirements.txt diff --git a/.gitignore b/.gitignore index 7f4ab7c..aa30324 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ -.idea/* -.vscode/* -venv/* +.idea +.venv +.vscode +mcstrack.egg-info +venv *.pyc -.venv/* - diff --git a/doc/setup.md b/doc/setup.md index a871d27..ec07dc1 100644 --- a/doc/setup.md +++ b/doc/setup.md @@ -53,11 +53,11 @@ Please install a recent version of Visual Studio Build Tools: https://visualstud You may need to manually modify the installation and ensure the C++ workload is selected. ``` -py -3.11 -m venv venv +py -3.11 -m venv .venv cd venv/Scripts activate cd ../.. -pip install -r requirements.txt +pip install .[gui,component] ``` ### Linux @@ -70,9 +70,9 @@ You may need to install additional packages depending on your distribution. The - python3.11-dev ``` -py -3.11 -m venv venv +py -3.11 -m venv .venv source venv/bin/activate -pip install -r requirements.txt +pip install .[gui,component] ``` ### Troubleshooting diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..dcb91da --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,42 @@ +[build-system] +requires = ["setuptools>=65.5"] + +[project] +name = "mcstrack" +version = "0.0.0" +authors = [ + { name="Thomas Vaughan", email="thomas.vanghan@cnrc-nrc.gc.ca" }, + { name="Keiran Barr", email="keiranbarr7@gmail.com" }, + { name="David Zhou", email="david.zhou.0110@gmail.com" }, + { name="Andras Lasso", email="lasso@queensu.ca" } +] +description = "Multi camera spatial tracking." +readme = "README.md" +requires-python = ">=3.11" +license = { text = "MIT" } +classifiers = [ + "Programming Language :: Python :: 3", + "License :: MIT" +] +dependencies = [ + "fastapi~=0.115", + "hjson~=3.1", + "numpy~=2.3", + "numpy-stl~=3.2", + "opencv-contrib-python~=4.11", + "pydantic~=2.11", + "scipy~=1.15", + "websockets~=15.0" +] + +[project.optional-dependencies] +gui = [ + "PyOpenGL~=3.1.7", + "wxpython~=4.2" +] +component = [ + "uvicorn[standard]~=0.35" +] + +[tool.setuptools] +py-modules = ["mcstrack"] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f9ed91d..0000000 --- a/requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -fastapi -hjson -numpy~=1.26 -numpy-stl~=3.1 -opencv-contrib-python~=4.11 -pydantic>=2 -PyOpenGL==3.1.7 -scipy -uvicorn[standard] -websocket -websockets~=13.1 -wxpython - diff --git a/setup/create_image.sh b/setup/create_image.sh index 9b2a906..b27ba2b 100755 --- a/setup/create_image.sh +++ b/setup/create_image.sh @@ -69,9 +69,7 @@ chmod 777 MCSTrack pushd MCSTrack python3 -m venv .venv --system-site-packages source .venv/bin/activate -# Exclude the following packages from this install, because the detector doesn't need it -grep -ivE "wxasync|wxpython|PyOpenGL==3.1.7|PyOpenGL-accelerate==3.1.7" requirements.txt> edited_requirements.txt -pip3 install --break-system-packages -r edited_requirements.txt +pip3 install --break-system-packages .[component] popd # Create startup script From a61aee5cd531a49325411d4ad7346ce61bca47a8 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 28 Aug 2025 17:51:24 -0400 Subject: [PATCH 26/33] BUG: Show status messages in GUI (added missing append to queue) --- src/controller/mct_controller.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 60fca77..fd34052 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -19,6 +19,7 @@ MCTResponseSeries, \ MixerFrame, \ SeverityLabel, \ + StatusMessage, \ StatusMessageSource, \ TimestampGetRequest, \ TimestampGetResponse, \ @@ -701,9 +702,9 @@ def start_up( self._startup_state = MCTController.StartupState.CONNECTING self._status = MCTController.Status.STARTING - self.recording_start(save_path="/home/adminpi5", - record_pose_solver=True, - record_detector=True) + # self.recording_start(save_path="/home/adminpi5", + # record_pose_solver=True, + # record_detector=True) def shut_down(self) -> None: if self._status != MCTController.Status.RUNNING: @@ -726,6 +727,13 @@ def update( connections = list(self._connections.values()) for connection in connections: connection.update() + status_messages: list[StatusMessage] = connection.dequeue_status_messages() + for status_message in status_messages: + self._status_message_source.enqueue_status_message( + severity=status_message.severity, + message=status_message.message, + source_label=status_message.source_label, + timestamp_utc_iso8601=status_message.timestamp_utc_iso8601) if self._status == MCTController.Status.STARTING and \ self._startup_state == MCTController.StartupState.CONNECTING: From 59aa33afa1fed14808fb7e646987de7f4eaaee4d Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Wed, 3 Sep 2025 16:30:24 -0400 Subject: [PATCH 27/33] BUG: Revert change that prevented Detector from communicating externally --- src/main_detector.py | 1 + src/main_mixer.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/main_detector.py b/src/main_detector.py index e912c35..498e7bc 100644 --- a/src/main_detector.py +++ b/src/main_detector.py @@ -8,6 +8,7 @@ def main(): app, reload=False, port=8001, + host="0.0.0.0", log_level=logging.INFO) diff --git a/src/main_mixer.py b/src/main_mixer.py index de55900..56aa823 100644 --- a/src/main_mixer.py +++ b/src/main_mixer.py @@ -8,6 +8,7 @@ def main(): app, reload=False, port=8000, + host="0.0.0.0", log_level=logging.INFO) From 2007013235e95464938429f149dac280259d36d6 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 4 Sep 2025 13:00:31 -0400 Subject: [PATCH 28/33] BUG: Fix issues during initial Detector connection --- src/common/api.py | 49 +++----- src/common/mct_component.py | 13 +- src/common/serialization.py | 30 ++--- src/controller/connection.py | 56 +++++++-- src/controller/mct_controller.py | 4 +- src/detector/api.py | 205 ++++++++++--------------------- src/detector/detector.py | 14 +-- src/mixer/api.py | 169 +++++++++---------------- src/mixer/mixer.py | 4 +- 9 files changed, 219 insertions(+), 325 deletions(-) diff --git a/src/common/api.py b/src/common/api.py index b7c7f20..720270a 100644 --- a/src/common/api.py +++ b/src/common/api.py @@ -2,7 +2,6 @@ from .status import StatusMessage import abc from pydantic import BaseModel, Field, SerializeAsAny -from typing import Final, Literal class MCTRequest(BaseModel, MCTDeserializable, abc.ABC): @@ -23,97 +22,81 @@ class MCTResponseSeries(BaseModel): class EmptyResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "empty" - @staticmethod def type_identifier() -> str: - return EmptyResponse._TYPE_IDENTIFIER + return "empty" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class ErrorResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "error" - @staticmethod def type_identifier() -> str: - return ErrorResponse._TYPE_IDENTIFIER + return "error" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) message: str = Field() class DequeueStatusMessagesRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" - @staticmethod def type_identifier() -> str: - return DequeueStatusMessagesRequest._TYPE_IDENTIFIER + return "dequeue_status_messages" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class DequeueStatusMessagesResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "dequeue_status_messages" - @staticmethod def type_identifier() -> str: - return DequeueStatusMessagesResponse._TYPE_IDENTIFIER + return "dequeue_status_messages" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) status_messages: list[StatusMessage] = Field() class TimeSyncStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "time_sync_start_request" - @staticmethod def type_identifier() -> str: - return TimeSyncStartRequest._TYPE_IDENTIFIER + return "time_sync_start_request" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class TimeSyncStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "time_sync_stop_request" - @staticmethod def type_identifier() -> str: - return TimeSyncStopRequest._TYPE_IDENTIFIER + return "time_sync_stop_request" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class TimestampGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "timestamp_get_request" - @staticmethod def type_identifier() -> str: - return TimestampGetRequest._TYPE_IDENTIFIER + return "timestamp_get_request" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) requester_timestamp_utc_iso8601: str = Field() class TimestampGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "timestamp_get_response" - @staticmethod def type_identifier() -> str: - return TimestampGetResponse._TYPE_IDENTIFIER + return "timestamp_get_response" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) requester_timestamp_utc_iso8601: str = Field() responder_timestamp_utc_iso8601: str = Field() diff --git a/src/common/mct_component.py b/src/common/mct_component.py index 7ab5513..5e804ed 100644 --- a/src/common/mct_component.py +++ b/src/common/mct_component.py @@ -108,7 +108,7 @@ def add_status_subscriber( def parse_dynamic_series_list( self, parsable_series_dict: dict, - supported_types: list[type[SerializableSingle]] + supported_types: dict[str, type[SerializableSingle]] ) -> list[SerializableSingle]: try: return MCTDeserializable.deserialize_series_list( @@ -168,8 +168,13 @@ def get_role_label(): def get_status_message_source(self): return self._status_message_source + def supported_request_types(self) -> dict[str, list[MCTRequest]]: + return { + request_type.type_identifier(): request_type + for request_type in self.supported_request_methods().keys()} + @abc.abstractmethod - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: """ All subclasses are expected to implement this method, even if it is simply a call to super(). :return: @@ -213,7 +218,7 @@ async def websocket_handler(self, websocket: WebSocket) -> None: try: request_series_list: list[MCTRequest] = self.parse_dynamic_series_list( parsable_series_dict=request_series_dict, - supported_types=list(self.supported_request_types().keys())) + supported_types=self.supported_request_types()) except MCTSerializationError as e: logger.exception(str(e)) await websocket.send_json(MCTResponseSeries().model_dump()) @@ -231,7 +236,7 @@ def websocket_handle_requests( client_identifier: str, request_series: MCTRequestSeries ) -> MCTResponseSeries: - request_map: dict[type[MCTRequest], Callable] = self.supported_request_types() + request_map: dict[type[MCTRequest], Callable] = self.supported_request_methods() response_series: list[MCTResponse] = list() for request in request_series.series: # noinspection PyBroadException diff --git a/src/common/serialization.py b/src/common/serialization.py index 362299b..89ae192 100644 --- a/src/common/serialization.py +++ b/src/common/serialization.py @@ -133,7 +133,7 @@ def to_simple(self) -> KeyValueSimpleInt: KeyValueMetaInt] -DeserializableT = TypeVar('DeserializableT', bound='MCTParsable') +DeserializableT = TypeVar('DeserializableT', bound='MCTDeserializable') class MCTSerializationError(MCTError): @@ -154,7 +154,7 @@ def type_identifier() -> str: @staticmethod def deserialize_series_list( series_dict: dict, - supported_types: list[type[DeserializableT]] + supported_types: dict[str, type[DeserializableT]] ) -> list[DeserializableT]: if "series" not in series_dict or not isinstance(series_dict["series"], list): message: str = "parsable_series_dict did not contain field series. Input is improperly formatted." @@ -174,23 +174,25 @@ def deserialize_series_list( @staticmethod def deserialize_single( single_dict: dict, - supported_types: list[type[DeserializableT]] + supported_types: dict[str, type[DeserializableT]] ) -> DeserializableT: if "parsable_type" not in single_dict or not isinstance(single_dict["parsable_type"], str): message: str = "parsable_dict did not contain parsable_type. Input is improperly formatted." raise MCTSerializationError(message) from None - for supported_type in supported_types: - if single_dict["parsable_type"] == supported_type.parsable_type_identifier(): - request: DeserializableT - try: - request = supported_type(**single_dict) - except ValidationError as e: - raise MCTSerializationError(f"A request of type {supported_type} was ill-formed: {str(e)}") from None - return request - - message: str = "parsable_type did not match any expected value. Input is improperly formatted." - raise MCTSerializationError(message) + parsable_type: str = single_dict["parsable_type"] + if parsable_type not in supported_types: + message: str = "parsable_type did not match any expected value. Input is improperly formatted." + raise MCTSerializationError(message) + + supported_type: type[DeserializableT] = supported_types[parsable_type] + deserializable: DeserializableT + try: + deserializable = supported_type(**single_dict) + except ValidationError as e: + raise MCTSerializationError( + f"A deserializable of type {supported_type} was ill-formed: {str(e)}") from None + return deserializable class IOUtils: diff --git a/src/controller/connection.py b/src/controller/connection.py index 91bc189..e5175e8 100644 --- a/src/controller/connection.py +++ b/src/controller/connection.py @@ -38,6 +38,14 @@ AnnotatorParametersGetResponse, \ AnnotatorParametersSetRequest from src.mixer import \ + ExtrinsicCalibrationCalculateResponse, \ + ExtrinsicCalibrationImageAddResponse, \ + ExtrinsicCalibrationImageGetResponse, \ + ExtrinsicCalibrationImageMetadataListResponse, \ + ExtrinsicCalibrationResultGetResponse, \ + ExtrinsicCalibrationResultGetActiveResponse, \ + ExtrinsicCalibrationResultMetadataListResponse, \ + PoseSolverAddTargetResponse, \ PoseSolverGetPosesResponse, \ PoseSolverSetTargetsRequest, \ MixerStartRequest, \ @@ -398,12 +406,16 @@ def start_up(self) -> None: self._next_attempt_timestamp_utc = datetime.datetime.now(tz=datetime.timezone.utc) @abc.abstractmethod - def supported_response_types(self) -> list[type[MCTResponse]]: - return [ + def supported_response_types(self) -> dict[str, type[MCTResponse]]: + type_list: list[MCTResponse] = [ DequeueStatusMessagesResponse, EmptyResponse, ErrorResponse, TimestampGetResponse] + type_dict: dict[str, type[MCTResponse]] = { + type_single.type_identifier(): type_single + for type_single in type_list} + return type_dict def _try_connect(self) -> ConnectionResult: uri: str = f"ws://{self._component_address.ip_address}:{self._component_address.port}/websocket" @@ -643,8 +655,15 @@ def handle_initialization_response_series( message=f"The initialization response was not of the expected type EmptyResponse.") return Connection.InitializationResult.SUCCESS - def supported_response_types(self) -> list[type[MCTResponse]]: - return super().supported_response_types() + [ + def supported_response_types(self) -> dict[str, type[MCTResponse]]: + type_dict: dict[str, type[MCTResponse]] = super().supported_response_types() + type_list: list[MCTResponse] = [ + AnnotatorParametersGetResponse, + CameraImageGetResponse, + CameraParametersGetResponse, + CameraParametersSetResponse, + CameraResolutionGetResponse, + DetectorFrameGetResponse, IntrinsicCalibrationCalculateResponse, IntrinsicCalibrationImageAddResponse, IntrinsicCalibrationImageGetResponse, @@ -652,13 +671,11 @@ def supported_response_types(self) -> list[type[MCTResponse]]: IntrinsicCalibrationResolutionListResponse, IntrinsicCalibrationResultGetResponse, IntrinsicCalibrationResultGetActiveResponse, - IntrinsicCalibrationResultMetadataListResponse, - CameraImageGetResponse, - CameraParametersGetResponse, - CameraParametersSetResponse, - CameraResolutionGetResponse, - DetectorFrameGetResponse, - AnnotatorParametersGetResponse] + IntrinsicCalibrationResultMetadataListResponse] + type_dict.update({ + type_single.type_identifier(): type_single + for type_single in type_list}) + return type_dict class PoseSolverConnection(Connection): @@ -730,6 +747,19 @@ def handle_initialization_response_series( message=f"The initialization response was not of the expected type EmptyResponse.") return Connection.InitializationResult.SUCCESS - def supported_response_types(self) -> list[type[MCTResponse]]: - return super().supported_response_types() + [ + def supported_response_types(self) -> dict[str, type[MCTResponse]]: + type_dict: dict[str, type[MCTResponse]] = super().supported_response_types() + type_list: list[MCTResponse] = [ + ExtrinsicCalibrationCalculateResponse, + ExtrinsicCalibrationImageAddResponse, + ExtrinsicCalibrationImageGetResponse, + ExtrinsicCalibrationImageMetadataListResponse, + ExtrinsicCalibrationResultGetResponse, + ExtrinsicCalibrationResultGetActiveResponse, + ExtrinsicCalibrationResultMetadataListResponse, + PoseSolverAddTargetResponse, PoseSolverGetPosesResponse] + type_dict.update({ + type_single.type_identifier(): type_single + for type_single in type_list}) + return type_dict diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index fd34052..2767907 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -717,8 +717,8 @@ def shut_down(self) -> None: self.recording_stop() - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: - return super().supported_request_types() + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + return super().supported_request_methods() # Right now this function doesn't update on its own - must be called externally and regularly def update( diff --git a/src/detector/api.py b/src/detector/api.py index e45e6b1..d32d55a 100644 --- a/src/detector/api.py +++ b/src/detector/api.py @@ -9,64 +9,53 @@ MCTRequest, \ MCTResponse from pydantic import Field, SerializeAsAny -from typing import Final class AnnotatorParametersGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_annotator_parameters_get" - @staticmethod def type_identifier() -> str: - return AnnotatorParametersGetRequest._TYPE_IDENTIFIER + return "detector_annotator_parameters_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class AnnotatorParametersGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_annotator_parameters_get" - @staticmethod def type_identifier() -> str: - return AnnotatorParametersGetResponse._TYPE_IDENTIFIER + return "detector_annotator_parameters_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() class AnnotatorParametersSetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_annotator_parameters_set" - @staticmethod def type_identifier() -> str: - return AnnotatorParametersSetRequest._TYPE_IDENTIFIER + return "detector_annotator_parameters_set" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() class CameraImageGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" - @staticmethod def type_identifier() -> str: - return CameraImageGetRequest._TYPE_IDENTIFIER + return "detector_camera_image_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) format: ImageFormat = Field() requested_resolution: ImageResolution | None = Field(default=None) class CameraImageGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_image_get" - @staticmethod def type_identifier() -> str: - return CameraImageGetResponse._TYPE_IDENTIFIER + return "detector_camera_image_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) format: ImageFormat = Field() image_base64: str = Field() @@ -74,231 +63,191 @@ def type_identifier() -> str: class CameraParametersGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" - @staticmethod def type_identifier() -> str: - return CameraParametersGetRequest._TYPE_IDENTIFIER + return "detector_camera_parameters_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class CameraParametersGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_get" - @staticmethod def type_identifier() -> str: - return CameraParametersGetResponse._TYPE_IDENTIFIER + return "detector_camera_parameters_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) parameters: list[SerializeAsAny[KeyValueMetaAny]] = Field() class CameraParametersSetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" - @staticmethod def type_identifier() -> str: - return CameraParametersSetRequest._TYPE_IDENTIFIER + return "detector_camera_parameters_set" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) parameters: list[SerializeAsAny[KeyValueSimpleAny]] = Field() class CameraParametersSetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_parameters_set" - @staticmethod def type_identifier() -> str: - return CameraParametersSetResponse._TYPE_IDENTIFIER + return "detector_camera_parameters_set" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) resolution: ImageResolution = Field() # Sometimes parameter changes may result in changes of resolution class CameraResolutionGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" - @staticmethod def type_identifier() -> str: - return CameraResolutionGetRequest._TYPE_IDENTIFIER + return "detector_camera_resolution_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class CameraResolutionGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_camera_resolution_get" - @staticmethod def type_identifier() -> str: - return CameraResolutionGetResponse._TYPE_IDENTIFIER + return "detector_camera_resolution_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) resolution: ImageResolution = Field() class DetectorFrameGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" - @staticmethod def type_identifier() -> str: - return DetectorFrameGetRequest._TYPE_IDENTIFIER + return "detector_frame_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) include_detected: bool = Field(default=True) include_rejected: bool = Field(default=True) class DetectorFrameGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_frame_get" - @staticmethod def type_identifier() -> str: - return DetectorFrameGetResponse._TYPE_IDENTIFIER + return "detector_frame_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) frame: DetectorFrame = Field() class DetectorStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_start" - @staticmethod def type_identifier() -> str: - return DetectorStartRequest._TYPE_IDENTIFIER + return "detector_start" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class DetectorStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_stop" - @staticmethod def type_identifier() -> str: - return DetectorStopRequest._TYPE_IDENTIFIER + return "detector_stop" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class IntrinsicCalibrationCalculateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_calculate" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationCalculateRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_calculate" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_resolution: ImageResolution = Field() class IntrinsicCalibrationCalculateResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_calculate" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationCalculateResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_calculate" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) result_identifier: str = Field() intrinsic_calibration: IntrinsicCalibration = Field() class IntrinsicCalibrationDeleteStagedRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_delete_staged" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationDeleteStagedRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_delete_staged" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class IntrinsicCalibrationImageAddRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_add" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationImageAddRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_image_add" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class IntrinsicCalibrationImageAddResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_add" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationImageAddResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_image_add" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_identifier: str = Field() class IntrinsicCalibrationImageGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_get" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationImageGetRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_image_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_identifier: str = Field() class IntrinsicCalibrationImageGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_get" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationImageGetResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_image_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_base64: str = Field() class IntrinsicCalibrationImageMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_metadata_list" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationImageMetadataListRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_image_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_resolution: ImageResolution = Field() class IntrinsicCalibrationImageMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_metadata_list" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationImageMetadataListResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_image_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) metadata_list: list[IntrinsicCalibrator.ImageMetadata] = Field(default_factory=list) class IntrinsicCalibrationImageMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_image_metadata_update" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationImageMetadataUpdateRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_image_metadata_update" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_identifier: str = Field() image_state: IntrinsicCalibrator.ImageState = Field() @@ -306,105 +255,87 @@ def type_identifier() -> str: class IntrinsicCalibrationResolutionListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_detector_resolutions_list" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResolutionListRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_detector_resolutions_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class IntrinsicCalibrationResolutionListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_detector_resolutions_list" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResolutionListResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_detector_resolutions_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) resolutions: list[ImageResolution] = Field() class IntrinsicCalibrationResultGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_get" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResultGetRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_result_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) result_identifier: str = Field() class IntrinsicCalibrationResultGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_get" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResultGetResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_result_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) intrinsic_calibration: IntrinsicCalibration = Field() class IntrinsicCalibrationResultGetActiveRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_active_get" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResultGetActiveRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_result_active_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class IntrinsicCalibrationResultGetActiveResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_active_get" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResultGetActiveResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_result_active_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) intrinsic_calibration: IntrinsicCalibration = Field() class IntrinsicCalibrationResultMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_metadata_list" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResultMetadataListRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_result_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_resolution: ImageResolution = Field() class IntrinsicCalibrationResultMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_metadata_list" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResultMetadataListResponse._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_result_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) metadata_list: list[IntrinsicCalibrator.ResultMetadata] = Field(default_factory=list) class IntrinsicCalibrationResultMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "detector_intrinsic_calibration_result_metadata_update" - @staticmethod def type_identifier() -> str: - return IntrinsicCalibrationResultMetadataUpdateRequest._TYPE_IDENTIFIER + return "detector_intrinsic_calibration_result_metadata_update" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) result_identifier: str = Field() result_state: IntrinsicCalibrator.ResultState = Field() diff --git a/src/detector/detector.py b/src/detector/detector.py index acfd9bd..9fd4db0 100644 --- a/src/detector/detector.py +++ b/src/detector/detector.py @@ -420,8 +420,8 @@ def detector_stop( def get_role_label(): return _ROLE_LABEL - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: - return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_methods() return_value.update({ DetectorFrameGetRequest: self.detector_frame_get, DetectorStartRequest: self.detector_start, @@ -455,7 +455,7 @@ async def update(self): except MCTCameraRuntimeError as e: self.add_status_message( severity=SeverityLabel.ERROR, - message=e.message) + message=f"Exception occurred in Camera update: {e.message}") if self._annotator.get_status() == Annotator.Status.RUNNING and \ self._camera.get_changed_timestamp() > self._annotator.get_changed_timestamp(): try: @@ -463,7 +463,7 @@ async def update(self): except MCTAnnotatorRuntimeError as e: self.add_status_message( severity=SeverityLabel.ERROR, - message=e.message) - self._frame_count += 1 - if self._frame_count % 1000 == 0: - print(f"Update count: {self._frame_count}") + message=f"Exception occurred in Annotator update: {e.message}") + # self._frame_count += 1 + # if self._frame_count % 1000 == 0: + # print(f"Update count: {self._frame_count}") diff --git a/src/mixer/api.py b/src/mixer/api.py index 9269465..6944078 100644 --- a/src/mixer/api.py +++ b/src/mixer/api.py @@ -9,50 +9,41 @@ Pose, \ Target from pydantic import Field -from typing import Final, Literal class ExtrinsicCalibrationCalculateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_calculate" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationCalculateRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_calculate" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class ExtrinsicCalibrationCalculateResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_calculate" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationCalculateResponse._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_calculate" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) result_identifier: str = Field() extrinsic_calibration: ExtrinsicCalibration = Field() class ExtrinsicCalibrationDeleteStagedRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_delete_staged" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationDeleteStagedRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_delete_staged" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class ExtrinsicCalibrationImageAddRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_add" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationImageAddRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_image_add" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_base64: str = Field() detector_label: str = Field() @@ -60,71 +51,59 @@ def type_identifier() -> str: class ExtrinsicCalibrationImageAddResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_add" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationImageAddResponse._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_image_add" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_identifier: str = Field() class ExtrinsicCalibrationImageGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_get" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationImageGetRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_image_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_identifier: str = Field() class ExtrinsicCalibrationImageGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_get" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationImageGetResponse._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_image_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_base64: str = Field() class ExtrinsicCalibrationImageMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_metadata_list" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationImageMetadataListRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_image_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class ExtrinsicCalibrationImageMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_metadata_list" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationImageMetadataListResponse._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_image_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) metadata_list: list[ExtrinsicCalibrator.ImageMetadata] = Field(default_factory=list) class ExtrinsicCalibrationImageMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_image_metadata_update" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationImageMetadataUpdateRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_image_metadata_update" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) image_identifier: str = Field() image_state: ExtrinsicCalibrator.ImageState = Field() @@ -132,81 +111,67 @@ def type_identifier() -> str: class ExtrinsicCalibrationResultGetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_get" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationResultGetRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_result_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) result_identifier: str = Field() class ExtrinsicCalibrationResultGetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_get" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationResultGetResponse._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_result_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) extrinsic_calibration: ExtrinsicCalibration = Field() class ExtrinsicCalibrationResultGetActiveRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_active_get" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationResultGetActiveRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_result_active_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class ExtrinsicCalibrationResultGetActiveResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_active_get" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationResultGetActiveResponse._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_result_active_get" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) extrinsic_calibration: ExtrinsicCalibration = Field() class ExtrinsicCalibrationResultMetadataListRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_metadata_list" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationResultMetadataListRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_result_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class ExtrinsicCalibrationResultMetadataListResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_metadata_list" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationResultMetadataListResponse._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_result_metadata_list" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) metadata_list: list[ExtrinsicCalibrator.ResultMetadata] = Field(default_factory=list) class ExtrinsicCalibrationResultMetadataUpdateRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_extrinsic_calibration_result_metadata_update" - @staticmethod def type_identifier() -> str: - return ExtrinsicCalibrationResultMetadataUpdateRequest._TYPE_IDENTIFIER + return "mixer_extrinsic_calibration_result_metadata_update" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) result_identifier: str = Field() result_state: ExtrinsicCalibrator.ResultState = Field() @@ -214,141 +179,119 @@ def type_identifier() -> str: class PoseSolverAddDetectorFrameRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_add_marker_corners" - @staticmethod def type_identifier() -> str: - return PoseSolverAddDetectorFrameRequest._TYPE_IDENTIFIER + return "mixer_pose_solver_add_marker_corners" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) detector_label: str = Field() detector_frame: DetectorFrame = Field() class PoseSolverAddTargetRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_add_target" - @staticmethod def type_identifier() -> str: - return PoseSolverAddTargetRequest._TYPE_IDENTIFIER + return "mixer_pose_solver_add_target" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) target: Target = Field() class PoseSolverAddTargetResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_add_target" - @staticmethod def type_identifier() -> str: - return PoseSolverAddTargetResponse._TYPE_IDENTIFIER + return "mixer_pose_solver_add_target" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) target_id: str = Field() class PoseSolverGetPosesRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_get_poses" - @staticmethod def type_identifier() -> str: - return PoseSolverGetPosesRequest._TYPE_IDENTIFIER + return "mixer_pose_solver_get_poses" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class PoseSolverGetPosesResponse(MCTResponse): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_get_poses" - @staticmethod def type_identifier() -> str: - return PoseSolverGetPosesResponse._TYPE_IDENTIFIER + return "mixer_pose_solver_get_poses" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) detector_poses: list[Pose] target_poses: list[Pose] class PoseSolverSetExtrinsicRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_set_extrinsic_parameters" - @staticmethod def type_identifier() -> str: - return PoseSolverSetExtrinsicRequest._TYPE_IDENTIFIER + return "mixer_pose_solver_set_extrinsic_parameters" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) detector_label: str = Field() transform_to_reference: Matrix4x4 = Field() class PoseSolverSetReferenceRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_set_reference_marker" - @staticmethod def type_identifier() -> str: - return PoseSolverSetReferenceRequest._TYPE_IDENTIFIER + return "mixer_pose_solver_set_reference_marker" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) marker_id: int = Field() marker_diameter: float = Field() class PoseSolverSetTargetsRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_pose_solver_set_targets" - @staticmethod def type_identifier() -> str: - return PoseSolverSetTargetsRequest._TYPE_IDENTIFIER + return "mixer_pose_solver_set_targets" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) targets: list[Target] = Field() class MixerStartRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_start" - @staticmethod def type_identifier() -> str: - return MixerStartRequest._TYPE_IDENTIFIER + return "mixer_start" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class MixerStopRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_stop" - @staticmethod def type_identifier() -> str: - return MixerStopRequest._TYPE_IDENTIFIER + return "mixer_stop" # noinspection PyTypeHints - parsable_type: Literal[_TYPE_IDENTIFIER] = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) class MixerUpdateIntrinsicParametersRequest(MCTRequest): - _TYPE_IDENTIFIER: Final[str] = "mixer_update_intrinsic_parameters" - @staticmethod def type_identifier() -> str: - return MixerUpdateIntrinsicParametersRequest._TYPE_IDENTIFIER + return "mixer_update_intrinsic_parameters" - parsable_type: str = Field(default=_TYPE_IDENTIFIER) + parsable_type: str = Field(default=type_identifier()) detector_label: str = Field() intrinsic_parameters: IntrinsicParameters = Field() diff --git a/src/mixer/mixer.py b/src/mixer/mixer.py index bc83b58..e6ae863 100644 --- a/src/mixer/mixer.py +++ b/src/mixer/mixer.py @@ -338,8 +338,8 @@ def pose_solver_set_targets(self, **kwargs) -> EmptyResponse | ErrorResponse: return ErrorResponse(message=e.message) return EmptyResponse() - def supported_request_types(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: - return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_types() + def supported_request_methods(self) -> dict[type[MCTRequest], Callable[[dict], MCTResponse]]: + return_value: dict[type[MCTRequest], Callable[[dict], MCTResponse]] = super().supported_request_methods() return_value.update({ ExtrinsicCalibrationCalculateRequest: self.extrinsic_calibrator_calculate, ExtrinsicCalibrationDeleteStagedRequest: self.extrinsic_calibrator_delete_staged, From 7c0570ea441439111e724d2ac60f2599c84d068c Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 4 Sep 2025 13:39:17 -0400 Subject: [PATCH 29/33] BUG: Fix rendering of detected markers in GUI --- src/gui/panels/detector_panel.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index 8594714..19599a8 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -406,14 +406,14 @@ def _marker_snapshot_list_to_opencv_points( if len(marker_snapshot_list) <= 0: return numpy.asarray([], dtype=numpy.int32) return_value: list[list[list[(float, float)]]] = list() - current_base_label: str = marker_snapshot_list[0].base_feature_label() - current_shape_points: list[list[(float, float)]] = [[ - marker_snapshot_list[0].x_px * scale, - marker_snapshot_list[0].y_px * scale]] + current_base_label: str | None = None + current_shape_points: list[list[(float, float)]] | None = None for marker_snapshot in marker_snapshot_list: annotation_base_label = marker_snapshot.base_feature_label() if annotation_base_label != current_base_label: - return_value.append(current_shape_points) + if current_shape_points is not None: + return_value.append(current_shape_points) + current_shape_points = list() current_base_label = annotation_base_label current_shape_points.append([ marker_snapshot.x_px * scale, From 3a81b1e7fc2f055829fdccd1533a6bf3d324c24f Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Tue, 21 Oct 2025 17:38:33 -0400 Subject: [PATCH 30/33] ENH: Add Detector image generation files --- .gitignore | 1 + pi-gen-detector/config | 8 +++++ pi-gen-detector/create_image.sh | 36 +++++++++++++++++++ pi-gen-detector/stage6/00_custom/00-packages | 9 +++++ pi-gen-detector/stage6/00_custom/01-run.sh | 8 +++++ .../stage6/00_custom/02-run-chroot.sh | 17 +++++++++ .../stage6/00_custom/files/mcstrack_startup | 4 +++ pi-gen-detector/stage6/EXPORT_IMAGE | 1 + pi-gen-detector/stage6/prerun.sh | 4 +++ 9 files changed, 88 insertions(+) create mode 100644 pi-gen-detector/config create mode 100644 pi-gen-detector/create_image.sh create mode 100644 pi-gen-detector/stage6/00_custom/00-packages create mode 100644 pi-gen-detector/stage6/00_custom/01-run.sh create mode 100644 pi-gen-detector/stage6/00_custom/02-run-chroot.sh create mode 100644 pi-gen-detector/stage6/00_custom/files/mcstrack_startup create mode 100644 pi-gen-detector/stage6/EXPORT_IMAGE create mode 100644 pi-gen-detector/stage6/prerun.sh diff --git a/.gitignore b/.gitignore index aa30324..da9b86d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,6 @@ .venv .vscode mcstrack.egg-info +pi-gen-detector/pi-gen venv *.pyc diff --git a/pi-gen-detector/config b/pi-gen-detector/config new file mode 100644 index 0000000..64a39f7 --- /dev/null +++ b/pi-gen-detector/config @@ -0,0 +1,8 @@ +export IMG_NAME=mcstrack-detector +export LOCALE_DEFAULT=en_US.UTF-8 +export TIMEZONE_DEFAULT=America/Toronto +export DISABLE_FIRST_BOOT_USER_RENAME=1 +export FIRST_USER_NAME=admin +export FIRST_USER_PASS="admin" +export ENABLE_SSH=0 +export STAGE_LIST="stage0 stage1 stage2 stage6" diff --git a/pi-gen-detector/create_image.sh b/pi-gen-detector/create_image.sh new file mode 100644 index 0000000..215eb9f --- /dev/null +++ b/pi-gen-detector/create_image.sh @@ -0,0 +1,36 @@ +#!/bin/bash -e + + +echo "Checking dependencies..." +sudo apt install -y coreutils quilt parted qemu-user-static debootstrap zerofree zip dosfstools e2fsprogs libarchive-tools libcap2-bin grep rsync xz-utils file git curl bc gpg pigz xxd arch-test bmap-tools kmod + + + +echo "Cloning pi-gen repository..." +if [ -d "pi-gen" ]; then + rm -r pi-gen +fi +git clone --branch arm64 https://github.com/RPI-Distro/pi-gen.git + + +echo "Setting up for image creation..." +cp config pi-gen +cd pi-gen +touch ./stage3/SKIP +touch ./stage4/SKIP +touch ./stage5/SKIP +touch ./stage4/SKIP_IMAGES +touch ./stage5/SKIP_IMAGES +cp -r ../stage6 stage6 +chmod +x build.sh +chmod +x stage6/prerun.sh +chmod +x stage6/00_custom/01-run.sh +chmod +x stage6/00_custom/01-run-chroot.sh + + +echo "Building image..." +./build.sh +exitCode=$? +if [ $exitCode -ne 0 ]; then + echo "Exited with code ${exitCode}" ; exit -1 +fi diff --git a/pi-gen-detector/stage6/00_custom/00-packages b/pi-gen-detector/stage6/00_custom/00-packages new file mode 100644 index 0000000..2194977 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/00-packages @@ -0,0 +1,9 @@ +build-essential +libglib2.0-dev +libglu1-mesa-dev +libgl1-mesa-dev +libgtk-3-dev +python3-picamera2 +python3-pip +python3-venv +ufw diff --git a/pi-gen-detector/stage6/00_custom/01-run.sh b/pi-gen-detector/stage6/00_custom/01-run.sh new file mode 100644 index 0000000..e836df3 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/01-run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +install -d "${ROOTFS_DIR}/home/admin/MCSTrack/" +cp -r "${STAGE_DIR}/../../../data" "${ROOTFS_DIR}/home/admin/MCSTrack/" +cp -r "${STAGE_DIR}/../../../src" "${ROOTFS_DIR}/home/admin/MCSTrack/" +cp "${STAGE_DIR}/../../../pyproject.toml" "${ROOTFS_DIR}/home/admin/MCSTrack/" +install -d "${ROOTFS_DIR}/usr/local/bin/" +install "${STAGE_DIR}/00_custom/files/mcstrack_startup" "${ROOTFS_DIR}/usr/local/bin/" +chmod +x "${ROOTFS_DIR}/usr/local/bin/mcstrack_startup" diff --git a/pi-gen-detector/stage6/00_custom/02-run-chroot.sh b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh new file mode 100644 index 0000000..042a0e3 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh @@ -0,0 +1,17 @@ +#!/bin/bash +sudo apt update +sudo apt upgrade -y + +# Firewall setup +sudo ufw enable +sudo ufw allow 8001 + +# Python setup +cd /home/admin/MCSTrack +python3 -m venv --system-site-packages .venv +source .venv/bin/activate +pip install --break-system-packages .[component] +deactivate + +# Run startup script on boot +sudo echo "@reboot root /usr/local/bin/mcstrack_startup >> mcstrack_log.log" > /etc/cron.d/startup diff --git a/pi-gen-detector/stage6/00_custom/files/mcstrack_startup b/pi-gen-detector/stage6/00_custom/files/mcstrack_startup new file mode 100644 index 0000000..61aaee4 --- /dev/null +++ b/pi-gen-detector/stage6/00_custom/files/mcstrack_startup @@ -0,0 +1,4 @@ +#!/bin/bash +cd /home/admin/MCSTrack +source .venv/bin/activate +python -m src.main_detector diff --git a/pi-gen-detector/stage6/EXPORT_IMAGE b/pi-gen-detector/stage6/EXPORT_IMAGE new file mode 100644 index 0000000..9008606 --- /dev/null +++ b/pi-gen-detector/stage6/EXPORT_IMAGE @@ -0,0 +1 @@ +IMG_SUFFIX="-mcs" diff --git a/pi-gen-detector/stage6/prerun.sh b/pi-gen-detector/stage6/prerun.sh new file mode 100644 index 0000000..5d99308 --- /dev/null +++ b/pi-gen-detector/stage6/prerun.sh @@ -0,0 +1,4 @@ +#!/bin/bash -e +if [ ! -d "${ROOTFS_DIR}" ]; then + copy_previous +fi From fc6fa19327fc1dab933d78693efa03ff00140aa8 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Tue, 16 Dec 2025 13:09:27 -0500 Subject: [PATCH 31/33] BUG: Fix package dependencies and configuration for Detector --- .../opencv_aruco.json} | 0 data/configuration/detector/rpicam_aruco.json | 19 ++++++++++++++++++ .../{mixer_config.json => mixer/aruco.json} | 0 pi-gen-detector/create_image.sh | 2 +- pi-gen-detector/stage6/00_custom/01-run.sh | 0 .../stage6/00_custom/02-run-chroot.sh | 3 +++ pi-gen-detector/stage6/prerun.sh | 0 pyproject.toml | 20 +++++++++---------- src/detector/app.py | 8 ++++++-- src/mixer/app.py | 6 +++++- 10 files changed, 44 insertions(+), 14 deletions(-) rename data/configuration/{detector_config.json => detector/opencv_aruco.json} (100%) create mode 100644 data/configuration/detector/rpicam_aruco.json rename data/configuration/{mixer_config.json => mixer/aruco.json} (100%) mode change 100644 => 100755 pi-gen-detector/create_image.sh mode change 100644 => 100755 pi-gen-detector/stage6/00_custom/01-run.sh mode change 100644 => 100755 pi-gen-detector/stage6/00_custom/02-run-chroot.sh mode change 100644 => 100755 pi-gen-detector/stage6/prerun.sh diff --git a/data/configuration/detector_config.json b/data/configuration/detector/opencv_aruco.json similarity index 100% rename from data/configuration/detector_config.json rename to data/configuration/detector/opencv_aruco.json diff --git a/data/configuration/detector/rpicam_aruco.json b/data/configuration/detector/rpicam_aruco.json new file mode 100644 index 0000000..5bf4c83 --- /dev/null +++ b/data/configuration/detector/rpicam_aruco.json @@ -0,0 +1,19 @@ +{ + "detector_label": "detector_01", + "intrinsic_calibrator": { + "implementation": "charuco_opencv", + "configuration": { + "data_path": "./calibration/intrinsic/" + } + }, + "camera": { + "implementation": "opencv_capture_device", + "configuration": { + "capture_device": 0 + } + }, + "annotator": { + "implementation": "aruco_opencv", + "configuration": {} + } +} diff --git a/data/configuration/mixer_config.json b/data/configuration/mixer/aruco.json similarity index 100% rename from data/configuration/mixer_config.json rename to data/configuration/mixer/aruco.json diff --git a/pi-gen-detector/create_image.sh b/pi-gen-detector/create_image.sh old mode 100644 new mode 100755 index 215eb9f..dadc3e9 --- a/pi-gen-detector/create_image.sh +++ b/pi-gen-detector/create_image.sh @@ -25,7 +25,7 @@ cp -r ../stage6 stage6 chmod +x build.sh chmod +x stage6/prerun.sh chmod +x stage6/00_custom/01-run.sh -chmod +x stage6/00_custom/01-run-chroot.sh +chmod +x stage6/00_custom/02-run-chroot.sh echo "Building image..." diff --git a/pi-gen-detector/stage6/00_custom/01-run.sh b/pi-gen-detector/stage6/00_custom/01-run.sh old mode 100644 new mode 100755 diff --git a/pi-gen-detector/stage6/00_custom/02-run-chroot.sh b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh old mode 100644 new mode 100755 index 042a0e3..bd44af6 --- a/pi-gen-detector/stage6/00_custom/02-run-chroot.sh +++ b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh @@ -2,6 +2,9 @@ sudo apt update sudo apt upgrade -y +# Environment variables +export MCSTRACK_DETECTOR_CONFIGURATION_FILEPATH="/home/admin/MCSTrack/data/configuration/detector/rpicam_aruco.json" + # Firewall setup sudo ufw enable sudo ufw allow 8001 diff --git a/pi-gen-detector/stage6/prerun.sh b/pi-gen-detector/stage6/prerun.sh old mode 100644 new mode 100755 diff --git a/pyproject.toml b/pyproject.toml index dcb91da..4b7271e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,23 +19,23 @@ classifiers = [ "License :: MIT" ] dependencies = [ - "fastapi~=0.115", - "hjson~=3.1", - "numpy~=2.3", - "numpy-stl~=3.2", - "opencv-contrib-python~=4.11", - "pydantic~=2.11", - "scipy~=1.15", - "websockets~=15.0" + "fastapi~=0.115.13", + "hjson~=3.1.0", + "numpy~=2.3.2", + "numpy-stl~=3.2.0", + "opencv-contrib-python~=4.11.0.86", + "pydantic~=2.11.7", + "scipy~=1.15.3", + "websockets~=15.0.1" ] [project.optional-dependencies] gui = [ "PyOpenGL~=3.1.7", - "wxpython~=4.2" + "wxpython~=4.2.3" ] component = [ - "uvicorn[standard]~=0.35" + "uvicorn[standard]~=0.35.0" ] [tool.setuptools] diff --git a/src/detector/app.py b/src/detector/app.py index b6a9d06..4e68b8f 100644 --- a/src/detector/app.py +++ b/src/detector/app.py @@ -29,14 +29,18 @@ import hjson import logging import os +from typing import Final logger = logging.getLogger(__name__) +CONFIGURATION_FILEPATH_ENV_VAR: Final[str] = "MCSTRACK_DETECTOR_CONFIGURATION_FILEPATH" + def create_app() -> FastAPI: - detector_configuration_filepath: str = \ - os.path.join(os.path.dirname(__file__), "..", "..", "data", "configuration", "detector_config.json") + detector_configuration_filepath: str = os.path.join( + os.path.dirname(__file__), "..", "..", "data", "configuration", "detector", "opencv_aruco.json") + detector_configuration_filepath = os.getenv(CONFIGURATION_FILEPATH_ENV_VAR, detector_configuration_filepath) detector_configuration: Detector.Configuration with open(detector_configuration_filepath, 'r') as infile: detector_configuration_file_contents: str = infile.read() diff --git a/src/mixer/app.py b/src/mixer/app.py index 01e45be..42d804d 100644 --- a/src/mixer/app.py +++ b/src/mixer/app.py @@ -15,6 +15,7 @@ import hjson import logging import os +from typing import Final # Note: This is the only implementation, currently. @@ -23,10 +24,13 @@ logger = logging.getLogger(__name__) +CONFIGURATION_FILEPATH_ENV_VAR: Final[str] = "MCSTRACK_MIXER_CONFIGURATION_FILEPATH" + def create_app() -> FastAPI: configuration_filepath: str = os.path.join( - os.path.dirname(__file__), "..", "..", "data", "configuration", "mixer_config.json") + os.path.dirname(__file__), "..", "..", "data", "configuration", "mixer", "aruco.json") + configuration_filepath = os.getenv(CONFIGURATION_FILEPATH_ENV_VAR, configuration_filepath) configuration: Mixer.Configuration with open(configuration_filepath, 'r') as infile: file_contents: str = infile.read() From f2550bfc1a5799720ac5a4ea801499aec26be991 Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 18 Dec 2025 12:06:53 -0500 Subject: [PATCH 32/33] BUG: Further fixes to detector configuration in image --- data/configuration/detector/rpicam_aruco.json | 6 ++---- pi-gen-detector/stage6/00_custom/02-run-chroot.sh | 5 +---- pi-gen-detector/stage6/00_custom/files/mcstrack_startup | 1 + 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/data/configuration/detector/rpicam_aruco.json b/data/configuration/detector/rpicam_aruco.json index 5bf4c83..8cdd31b 100644 --- a/data/configuration/detector/rpicam_aruco.json +++ b/data/configuration/detector/rpicam_aruco.json @@ -7,10 +7,8 @@ } }, "camera": { - "implementation": "opencv_capture_device", - "configuration": { - "capture_device": 0 - } + "implementation": "picamera2", + "configuration": {} }, "annotator": { "implementation": "aruco_opencv", diff --git a/pi-gen-detector/stage6/00_custom/02-run-chroot.sh b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh index bd44af6..da3cfe2 100755 --- a/pi-gen-detector/stage6/00_custom/02-run-chroot.sh +++ b/pi-gen-detector/stage6/00_custom/02-run-chroot.sh @@ -2,9 +2,6 @@ sudo apt update sudo apt upgrade -y -# Environment variables -export MCSTRACK_DETECTOR_CONFIGURATION_FILEPATH="/home/admin/MCSTrack/data/configuration/detector/rpicam_aruco.json" - # Firewall setup sudo ufw enable sudo ufw allow 8001 @@ -17,4 +14,4 @@ pip install --break-system-packages .[component] deactivate # Run startup script on boot -sudo echo "@reboot root /usr/local/bin/mcstrack_startup >> mcstrack_log.log" > /etc/cron.d/startup +sudo echo "@reboot root /usr/local/bin/mcstrack_startup >> /home/admin/mcstrack_log.log" > /etc/cron.d/startup diff --git a/pi-gen-detector/stage6/00_custom/files/mcstrack_startup b/pi-gen-detector/stage6/00_custom/files/mcstrack_startup index 61aaee4..595d1b9 100644 --- a/pi-gen-detector/stage6/00_custom/files/mcstrack_startup +++ b/pi-gen-detector/stage6/00_custom/files/mcstrack_startup @@ -1,4 +1,5 @@ #!/bin/bash cd /home/admin/MCSTrack source .venv/bin/activate +export MCSTRACK_DETECTOR_CONFIGURATION_FILEPATH="/home/admin/MCSTrack/data/configuration/detector/rpicam_aruco.json" python -m src.main_detector From 9a0dc92d1ed556e8d3f2b1aca6a5d101da45e4fe Mon Sep 17 00:00:00 2001 From: Thomas Vaughan Date: Thu, 8 Jan 2026 14:21:48 -0500 Subject: [PATCH 33/33] BUG: Corrections based on testing --- .../controller_calibration_config.json | 384 ------------------ ...or_lone_local.json => detector_local.json} | 0 data/configuration/controller/dual_pi.json | 203 +++++++++ data/configuration/mixer_config.json | 2 +- src/common/image_processing.py | 4 +- src/common/math.py | 14 +- src/common/pose_solver.py | 26 +- src/controller/mct_controller.py | 8 +- src/gui/panels/detector_panel.py | 1 + src/gui/panels/extrinsics_panel.py | 50 ++- src/gui/panels/pose_solver_panel.py | 4 +- .../extrinsic_charuco_opencv.py | 28 +- src/mixer/app.py | 8 +- 13 files changed, 304 insertions(+), 428 deletions(-) delete mode 100644 data/configuration/controller/controller_calibration_config.json rename data/configuration/controller/{controller_detector_lone_local.json => detector_local.json} (100%) create mode 100644 data/configuration/controller/dual_pi.json diff --git a/data/configuration/controller/controller_calibration_config.json b/data/configuration/controller/controller_calibration_config.json deleted file mode 100644 index a20d4f7..0000000 --- a/data/configuration/controller/controller_calibration_config.json +++ /dev/null @@ -1,384 +0,0 @@ -{ - "startup_mode": "detecting_and_solving", - "detectors": [ - { - "label": "det1", - "ip_address": "127.0.0.1", - "port": 8001 - } - ], - "mixers": [ - { - "label": "sol", - "ip_address": "127.0.0.1", - "port": 8000, - "targets": [ - { - "target_id": "reference", - "markers": [ - { - "marker_id": "0", - "points": [ - [-55.0,95.0,0.0], - [-45.0,95.0,0.0], - [-45.0,85.0,0.0], - [-55.0,85.0,0.0] - ] - }, - { - "marker_id": "1", - "points": [ - [-15.0,95.0,0.0], - [-5.0,95.0,0.0], - [-5.0,85.0,0.0], - [-15.0,85.0,0.0] - ] - }, - { - "marker_id": "2", - "points": [ - [25.0,95.0,0.0], - [35.0,95.0,0.0], - [35.0,85.0,0.0], - [25.0,85.0,0.0] - ] - }, - { - "marker_id": "3", - "points": [ - [65.0,95.0,0.0], - [75.0,95.0,0.0], - [75.0,85.0,0.0], - [65.0,85.0,0.0] - ] - }, - { - "marker_id": "4", - "points": [ - [-75.0,75.0,0.0], - [-65.0,75.0,0.0], - [-65.0,65.0,0.0], - [-75.0,65.0,0.0] - ] - }, - { - "marker_id": "5", - "points": [ - [-35.0,75.0,0.0], - [-25.0,75.0,0.0], - [-25.0,65.0,0.0], - [-35.0,65.0,0.0] - ] - }, - { - "marker_id": "6", - "points": [ - [5.0,75.0,0.0], - [15.0,75.0,0.0], - [15.0,65.0,0.0], - [5.0,65.0,0.0] - ] - }, - { - "marker_id": "7", - "points": [ - [45.0,75.0,0.0], - [55.0,75.0,0.0], - [55.0,65.0,0.0], - [45.0,65.0,0.0] - ] - }, - { - "marker_id": "8", - "points": [ - [-55.0,55.0,0.0], - [-45.0,55.0,0.0], - [-45.0,45.0,0.0], - [-55.0,45.0,0.0] - ] - }, - { - "marker_id": "9", - "points": [ - [-15.0,55.0,0.0], - [-5.0,55.0,0.0], - [-5.0,45.0,0.0], - [-15.0,45.0,0.0] - ] - }, - { - "marker_id": "10", - "points": [ - [25.0,55.0,0.0], - [35.0,55.0,0.0], - [35.0,45.0,0.0], - [25.0,45.0,0.0] - ] - }, - { - "marker_id": "11", - "points": [ - [65.0,55.0,0.0], - [75.0,55.0,0.0], - [75.0,45.0,0.0], - [65.0,45.0,0.0] - ] - }, - { - "marker_id": "12", - "points": [ - [-75.0,35.0,0.0], - [-65.0,35.0,0.0], - [-65.0,25.0,0.0], - [-75.0,25.0,0.0] - ] - }, - { - "marker_id": "13", - "points": [ - [-35.0,35.0,0.0], - [-25.0,35.0,0.0], - [-25.0,25.0,0.0], - [-35.0,25.0,0.0] - ] - }, - { - "marker_id": "14", - "points": [ - [5.0,35.0,0.0], - [15.0,35.0,0.0], - [15.0,25.0,0.0], - [5.0,25.0,0.0] - ] - }, - { - "marker_id": "15", - "points": [ - [45.0,35.0,0.0], - [55.0,35.0,0.0], - [55.0,25.0,0.0], - [45.0,25.0,0.0] - ] - }, - { - "marker_id": "16", - "points": [ - [-55.0,15.0,0.0], - [-45.0,15.0,0.0], - [-45.0,5.0,0.0], - [-55.0,5.0,0.0] - ] - }, - { - "marker_id": "17", - "points": [ - [-15.0,15.0,0.0], - [-5.0,15.0,0.0], - [-5.0,5.0,0.0], - [-15.0,5.0,0.0] - ] - }, - { - "marker_id": "18", - "points": [ - [25.0,15.0,0.0], - [35.0,15.0,0.0], - [35.0,5.0,0.0], - [25.0,5.0,0.0] - ] - }, - { - "marker_id": "19", - "points": [ - [65.0,15.0,0.0], - [75.0,15.0,0.0], - [75.0,5.0,0.0], - [65.0,5.0,0.0] - ] - }, - { - "marker_id": "20", - "points": [ - [-75.0,-5.0,0.0], - [-65.0,-5.0,0.0], - [-65.0,-15.0,0.0], - [-75.0,-15.0,0.0] - ] - }, - { - "marker_id": "21", - "points": [ - [-35.0,-5.0,0.0], - [-25.0,-5.0,0.0], - [-25.0,-15.0,0.0], - [-35.0,-15.0,0.0] - ] - }, - { - "marker_id": "22", - "points": [ - [5.0,-5.0,0.0], - [15.0,-5.0,0.0], - [15.0,-15.0,0.0], - [5.0,-15.0,0.0] - ] - }, - { - "marker_id": "23", - "points": [ - [45.0,-5.0,0.0], - [55.0,-5.0,0.0], - [55.0,-15.0,0.0], - [45.0,-15.0,0.0] - ] - }, - { - "marker_id": "24", - "points": [ - [-55.0,-25.0,0.0], - [-45.0,-25.0,0.0], - [-45.0,-35.0,0.0], - [-55.0,-35.0,0.0] - ] - }, - { - "marker_id": "25", - "points": [ - [-15.0,-25.0,0.0], - [-5.0,-25.0,0.0], - [-5.0,-35.0,0.0], - [-15.0,-35.0,0.0] - ] - }, - { - "marker_id": "26", - "points": [ - [25.0,-25.0,0.0], - [35.0,-25.0,0.0], - [35.0,-35.0,0.0], - [25.0,-35.0,0.0] - ] - }, - { - "marker_id": "27", - "points": [ - [65.0,-25.0,0.0], - [75.0,-25.0,0.0], - [75.0,-35.0,0.0], - [65.0,-35.0,0.0] - ] - }, - { - "marker_id": "28", - "points": [ - [-75.0,-45.0,0.0], - [-65.0,-45.0,0.0], - [-65.0,-55.0,0.0], - [-75.0,-55.0,0.0] - ] - }, - { - "marker_id": "29", - "points": [ - [-35.0,-45.0,0.0], - [-25.0,-45.0,0.0], - [-25.0,-55.0,0.0], - [-35.0,-55.0,0.0] - ] - }, - { - "marker_id": "30", - "points": [ - [5.0,-45.0,0.0], - [15.0,-45.0,0.0], - [15.0,-55.0,0.0], - [5.0,-55.0,0.0] - ] - }, - { - "marker_id": "31", - "points": [ - [45.0,-45.0,0.0], - [55.0,-45.0,0.0], - [55.0,-55.0,0.0], - [45.0,-55.0,0.0] - ] - }, - { - "marker_id": "32", - "points": [ - [-55.0,-65.0,0.0], - [-45.0,-65.0,0.0], - [-45.0,-75.0,0.0], - [-55.0,-75.0,0.0] - ] - }, - { - "marker_id": "33", - "points": [ - [-15.0,-65.0,0.0], - [-5.0,-65.0,0.0], - [-5.0,-75.0,0.0], - [-15.0,-75.0,0.0] - ] - }, - { - "marker_id": "34", - "points": [ - [25.0,-65.0,0.0], - [35.0,-65.0,0.0], - [35.0,-75.0,0.0], - [25.0,-75.0,0.0] - ] - }, - { - "marker_id": "35", - "points": [ - [65.0,-65.0,0.0], - [75.0,-65.0,0.0], - [75.0,-75.0,0.0], - [65.0,-75.0,0.0] - ] - }, - { - "marker_id": "36", - "points": [ - [-75.0,-85.0,0.0], - [-65.0,-85.0,0.0], - [-65.0,-95.0,0.0], - [-75.0,-95.0,0.0] - ] - }, - { - "marker_id": "37", - "points": [ - [-35.0,-85.0,0.0], - [-25.0,-85.0,0.0], - [-25.0,-95.0,0.0], - [-35.0,-95.0,0.0] - ] - }, - { - "marker_id": "38", - "points": [ - [5.0,-85.0,0.0], - [15.0,-85.0,0.0], - [15.0,-95.0,0.0], - [5.0,-95.0,0.0] - ] - }, - { - "marker_id": "39", - "points": [ - [45.0,-85.0,0.0], - [55.0,-85.0,0.0], - [55.0,-95.0,0.0], - [45.0,-95.0,0.0] - ] - } - ] - } - ] - } - ] -} diff --git a/data/configuration/controller/controller_detector_lone_local.json b/data/configuration/controller/detector_local.json similarity index 100% rename from data/configuration/controller/controller_detector_lone_local.json rename to data/configuration/controller/detector_local.json diff --git a/data/configuration/controller/dual_pi.json b/data/configuration/controller/dual_pi.json new file mode 100644 index 0000000..8cecafb --- /dev/null +++ b/data/configuration/controller/dual_pi.json @@ -0,0 +1,203 @@ +{ + "startup_mode": "detecting_and_solving", + "detectors": [ + { + "label": "d101", + "ip_address": "192.168.0.101", + "port": 8001, + "camera_parameters": [], + "marker_parameters": [ + { + "key": "cornerRefinementMethod", + "value": "SUBPIX" + } + ] + }, + { + "label": "d102", + "ip_address": "192.168.0.102", + "port": 8001, + "camera_parameters": [], + "marker_parameters": [ + { + "key": "cornerRefinementMethod", + "value": "SUBPIX" + } + ] + } + ], + "mixers": [ + { + "label": "sol", + "ip_address": "192.168.0.103", + "port": 8000, + "targets": [ + { + "label": "reference", + "landmarks": [ + { "feature_label": "0$0", "x": -55.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "0$1", "x": -45.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "0$2", "x": -45.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "0$3", "x": -55.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "1$0", "x": -15.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "1$1", "x": -5.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "1$2", "x": -5.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "1$3", "x": -15.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "2$0", "x": 25.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "2$1", "x": 35.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "2$2", "x": 35.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "2$3", "x": 25.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "3$0", "x": 65.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "3$1", "x": 75.0, "y": 95.0, "z": 0.0 }, + { "feature_label": "3$2", "x": 75.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "3$3", "x": 65.0, "y": 85.0, "z": 0.0 }, + { "feature_label": "4$0", "x": -75.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "4$1", "x": -65.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "4$2", "x": -65.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "4$3", "x": -75.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "5$0", "x": -35.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "5$1", "x": -25.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "5$2", "x": -25.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "5$3", "x": -35.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "6$0", "x": 5.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "6$1", "x": 15.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "6$2", "x": 15.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "6$3", "x": 5.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "7$0", "x": 45.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "7$1", "x": 55.0, "y": 75.0, "z": 0.0 }, + { "feature_label": "7$2", "x": 55.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "7$3", "x": 45.0, "y": 65.0, "z": 0.0 }, + { "feature_label": "8$0", "x": -55.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "8$1", "x": -45.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "8$2", "x": -45.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "8$3", "x": -55.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "9$0", "x": -15.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "9$1", "x": -5.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "9$2", "x": -5.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "9$3", "x": -15.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "10$0", "x": 25.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "10$1", "x": 35.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "10$2", "x": 35.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "10$3", "x": 25.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "11$0", "x": 65.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "11$1", "x": 75.0, "y": 55.0, "z": 0.0 }, + { "feature_label": "11$2", "x": 75.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "11$3", "x": 65.0, "y": 45.0, "z": 0.0 }, + { "feature_label": "12$0", "x": -75.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "12$1", "x": -65.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "12$2", "x": -65.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "12$3", "x": -75.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "13$0", "x": -35.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "13$1", "x": -25.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "13$2", "x": -25.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "13$3", "x": -35.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "14$0", "x": 5.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "14$1", "x": 15.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "14$2", "x": 15.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "14$3", "x": 5.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "15$0", "x": 45.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "15$1", "x": 55.0, "y": 35.0, "z": 0.0 }, + { "feature_label": "15$2", "x": 55.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "15$3", "x": 45.0, "y": 25.0, "z": 0.0 }, + { "feature_label": "16$0", "x": -55.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "16$1", "x": -45.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "16$2", "x": -45.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "16$3", "x": -55.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "17$0", "x": -15.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "17$1", "x": -5.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "17$2", "x": -5.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "17$3", "x": -15.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "18$0", "x": 25.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "18$1", "x": 35.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "18$2", "x": 35.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "18$3", "x": 25.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "19$0", "x": 65.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "19$1", "x": 75.0, "y": 15.0, "z": 0.0 }, + { "feature_label": "19$2", "x": 75.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "19$3", "x": 65.0, "y": 5.0, "z": 0.0 }, + { "feature_label": "20$0", "x": -75.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "20$1", "x": -65.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "20$2", "x": -65.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "20$3", "x": -75.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "21$0", "x": -35.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "21$1", "x": -25.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "21$2", "x": -25.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "21$3", "x": -35.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "22$0", "x": 5.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "22$1", "x": 15.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "22$2", "x": 15.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "22$3", "x": 5.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "23$0", "x": 45.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "23$1", "x": 55.0, "y": -5.0, "z": 0.0 }, + { "feature_label": "23$2", "x": 55.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "23$3", "x": 45.0, "y": -15.0, "z": 0.0 }, + { "feature_label": "24$0", "x": -55.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "24$1", "x": -45.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "24$2", "x": -45.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "24$3", "x": -55.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "25$0", "x": -15.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "25$1", "x": -5.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "25$2", "x": -5.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "25$3", "x": -15.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "26$0", "x": 25.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "26$1", "x": 35.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "26$2", "x": 35.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "26$3", "x": 25.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "27$0", "x": 65.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "27$1", "x": 75.0, "y": -25.0, "z": 0.0 }, + { "feature_label": "27$2", "x": 75.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "27$3", "x": 65.0, "y": -35.0, "z": 0.0 }, + { "feature_label": "28$0", "x": -75.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "28$1", "x": -65.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "28$2", "x": -65.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "28$3", "x": -75.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "29$0", "x": -35.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "29$1", "x": -25.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "29$2", "x": -25.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "29$3", "x": -35.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "30$0", "x": 5.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "30$1", "x": 15.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "30$2", "x": 15.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "30$3", "x": 5.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "31$0", "x": 45.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "31$1", "x": 55.0, "y": -45.0, "z": 0.0 }, + { "feature_label": "31$2", "x": 55.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "31$3", "x": 45.0, "y": -55.0, "z": 0.0 }, + { "feature_label": "32$0", "x": -55.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "32$1", "x": -45.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "32$2", "x": -45.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "32$3", "x": -55.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "33$0", "x": -15.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "33$1", "x": -5.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "33$2", "x": -5.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "33$3", "x": -15.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "34$0", "x": 25.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "34$1", "x": 35.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "34$2", "x": 35.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "34$3", "x": 25.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "35$0", "x": 65.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "35$1", "x": 75.0, "y": -65.0, "z": 0.0 }, + { "feature_label": "35$2", "x": 75.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "35$3", "x": 65.0, "y": -75.0, "z": 0.0 }, + { "feature_label": "36$0", "x": -75.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "36$1", "x": -65.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "36$2", "x": -65.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "36$3", "x": -75.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "37$0", "x": -35.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "37$1", "x": -25.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "37$2", "x": -25.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "37$3", "x": -35.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "38$0", "x": 5.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "38$1", "x": 15.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "38$2", "x": 15.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "38$3", "x": 5.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "39$0", "x": 45.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "39$1", "x": 55.0, "y": -85.0, "z": 0.0 }, + { "feature_label": "39$2", "x": 55.0, "y": -95.0, "z": 0.0 }, + { "feature_label": "39$3", "x": 45.0, "y": -95.0, "z": 0.0 } + ] + } + ] + } + ] +} diff --git a/data/configuration/mixer_config.json b/data/configuration/mixer_config.json index dfae5df..85ef7d2 100644 --- a/data/configuration/mixer_config.json +++ b/data/configuration/mixer_config.json @@ -3,7 +3,7 @@ "extrinsic_calibrator": { "implementation": "charuco_opencv", "configuration": { - "data_path": "./calibration/intrinsic/" + "data_path": "./calibration/extrinsic/" } } } diff --git a/src/common/image_processing.py b/src/common/image_processing.py index cf1aa69..2d1fb46 100644 --- a/src/common/image_processing.py +++ b/src/common/image_processing.py @@ -196,10 +196,10 @@ def partition_rect( if cell_count >= partition_count: break width_px: int = available_size_px[0] // width_cells - height_px: int = available_size_px[1] // width_cells + height_px: int = available_size_px[1] // height_cells positions_px: list[tuple[int, int]] = list() for cell_index in range(0, partition_count): - y_cell = cell_index // height_cells + y_cell = cell_index // width_cells x_cell = cell_index % width_cells y_px = y_cell * height_px x_px = x_cell * width_px diff --git a/src/common/math.py b/src/common/math.py index 8f59cdf..6b732a6 100644 --- a/src/common/math.py +++ b/src/common/math.py @@ -454,7 +454,7 @@ def convert_detector_points_to_vectors( """ distorted_points: numpy.ndarray = numpy.asarray(points) distorted_points = numpy.reshape( - a=distorted_points, + distorted_points, newshape=(1, len(points), 2)) undistorted_points: numpy.ndarray = cv2.undistortPoints( src=distorted_points, @@ -527,7 +527,13 @@ def estimate_matrix_transform_to_detector( annotations: list[Annotation], landmarks: list[Landmark], detector_intrinsics: IntrinsicParameters - ) -> Matrix4x4: + ) -> tuple[bool, Matrix4x4 | None]: + """ + returns: Tuple containing: + 0: 'estimated' bool indicating whether it was possible to make an estimate. + Could be false if e.g. no annotations correspond to the provided landmarks + 1: If 'estimated' is True, the matrix transform that was calculated. Otherwise None. + """ target_points: list[list[float]] = list() # ordered points [point_index][x/y/z] detector_points: list[list[float]] = list() # ordered points [point_index][x/y] annotations_dict: dict[str, Annotation] = {annotation.feature_label: annotation for annotation in annotations} @@ -536,6 +542,8 @@ def estimate_matrix_transform_to_detector( annotation = annotations_dict[landmark.feature_label] target_points.append([landmark.x, landmark.y, landmark.z]) detector_points.append([annotation.x_px, annotation.y_px]) + if len(detector_points) <= 4: + return False, None rotation_vector: numpy.ndarray translation_vector: numpy.ndarray _, rotation_vector, translation_vector = cv2.solvePnP( @@ -726,7 +734,7 @@ def register_corresponding_points( collinearity_do_check: bool = True, collinearity_zero_threshold: float = 0.0001, use_oomori_mirror_fix: bool = True - ) -> numpy.array: # 4x4 transformation matrix, indexed by [row,col] + ) -> numpy.ndarray: # 4x4 transformation matrix, indexed by [row,col] """ Solution based on: Arun et al. Least square fitting of two 3D point sets (1987) https://stackoverflow.com/questions/66923224/rigid-registration-of-two-point-clouds-with-known-correspondence diff --git a/src/common/pose_solver.py b/src/common/pose_solver.py index 34232da..dfdbf6f 100644 --- a/src/common/pose_solver.py +++ b/src/common/pose_solver.py @@ -431,13 +431,16 @@ def update(self) -> None: self._poses_by_detector_label[detector_label] = self._extrinsics_by_detector_label[detector_label] else: intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] - reference_to_detector: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + estimated: bool + reference_to_detector: Matrix4x4 | None + estimated, reference_to_detector = MathUtils.estimate_matrix_transform_to_detector( annotations=annotation_list_by_detector_label[detector_label], landmarks=reference_target.landmarks, detector_intrinsics=intrinsics) - detector_to_reference: Matrix4x4 = Matrix4x4.from_numpy_array( - numpy.linalg.inv(reference_to_detector.as_numpy_array())) - self._poses_by_detector_label[detector_label] = detector_to_reference + if estimated: + detector_to_reference: Matrix4x4 = Matrix4x4.from_numpy_array( + numpy.linalg.inv(reference_to_detector.as_numpy_array())) + self._poses_by_detector_label[detector_label] = detector_to_reference # At the time of writing, each feature label can be used only by one target. # So we can remove annotations whose feature labels match those of the reference_target @@ -454,6 +457,8 @@ def update(self) -> None: # Convert annotations to rays rays_by_feature_and_detector: dict[str, dict[str, Ray]] = dict() # indexed as [feature_label][detector_label] for detector_label in detector_labels: + if detector_label not in self._poses_by_detector_label: + continue annotations: list[Annotation] = annotation_list_by_detector_label[detector_label] annotation_points: list[list[float]] = [[annotation.x_px, annotation.y_px] for annotation in annotations] detector_to_reference: Matrix4x4 = self._poses_by_detector_label[detector_label] @@ -516,14 +521,17 @@ def update(self) -> None: # Note: there cannot be any intersections in this case detector_label: str = next(iter(detector_labels_seeing_target)) intrinsics: IntrinsicParameters = self._intrinsics_by_detector_label[detector_label] - detected_to_detector_matrix4x4: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + estimated: bool + detected_to_detector_matrix4x4: Matrix4x4 + estimated, detected_to_detector_matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( annotations=annotation_list_by_detector_label[detector_label], landmarks=target.landmarks, detector_intrinsics=intrinsics) - detected_to_detector: numpy.ndarray = detected_to_detector_matrix4x4.as_numpy_array() - detector_to_reference: numpy.ndarray = self._poses_by_detector_label[detector_label].as_numpy_array() - detected_to_reference: numpy.ndarray = detector_to_reference @ detected_to_detector - self._poses_by_target_label[target.label] = Matrix4x4.from_numpy_array(detected_to_reference) + if estimated: + detected_to_detector: numpy.ndarray = detected_to_detector_matrix4x4.as_numpy_array() + detector_to_reference: numpy.ndarray = self._poses_by_detector_label[detector_label].as_numpy_array() + detected_to_reference: numpy.ndarray = detector_to_reference @ detected_to_detector + self._poses_by_target_label[target.label] = Matrix4x4.from_numpy_array(detected_to_reference) else: # Fill in the required variables for the customized iterative closest point detected_known_points: list[list[float]] = [ diff --git a/src/controller/mct_controller.py b/src/controller/mct_controller.py index 2767907..bcd66ed 100644 --- a/src/controller/mct_controller.py +++ b/src/controller/mct_controller.py @@ -152,7 +152,7 @@ def is_valid_ip_address(connection: MCTComponentConfig) -> bool: continue component_address: Connection.ComponentAddress = Connection.ComponentAddress( label=pose_solver.label, - role="pose_solver", + role="mixer", ip_address=IPv4Address(pose_solver.ip_address), port=pose_solver.port) pose_solver_connection: PoseSolverConnection = self.add_connection(component_address=component_address) @@ -240,7 +240,7 @@ def _advance_startup_state(self) -> None: self._startup_state = MCTController.StartupState.INITIAL self._status = MCTController.Status.RUNNING # We're done else: - pose_solver_labels: list[str] = self.get_active_pose_solver_labels() + pose_solver_labels: list[str] = self.get_active_mixer_labels() for pose_solver_label in pose_solver_labels: requests: list[MCTRequest] = list() for detector_label in self.get_active_detector_labels(): @@ -281,7 +281,7 @@ def get_active_detector_labels(self) -> list[str]: """ return self.get_component_labels(role=Detector.get_role_label(), active=True) - def get_active_pose_solver_labels(self) -> list[str]: + def get_active_mixer_labels(self) -> list[str]: """ See get_component_labels. """ @@ -773,7 +773,7 @@ def update( detector_connection.request_id = self.request_series_push( connection_label=detector_label, request_series=MCTRequestSeries(series=[DetectorFrameGetRequest()])) - for pose_solver_label in self.get_active_pose_solver_labels(): + for pose_solver_label in self.get_active_mixer_labels(): pose_solver_connection: PoseSolverConnection = self._get_connection( connection_label=pose_solver_label, connection_type=PoseSolverConnection) diff --git a/src/gui/panels/detector_panel.py b/src/gui/panels/detector_panel.py index 19599a8..c93b2c7 100644 --- a/src/gui/panels/detector_panel.py +++ b/src/gui/panels/detector_panel.py @@ -534,6 +534,7 @@ def _update_ui_image(self): display_image: numpy.ndarray = ImageUtils.image_resize_to_fit( opencv_image=opencv_image, available_size=self._image_panel.GetSize()) + cv2.cvtColor(display_image, cv2.COLOR_RGB2BGR, display_image) scale: float = self._preview_scale_factor.get_value() * display_image.shape[0] / opencv_image.shape[0] else: display_image = ImageUtils.black_image(resolution_px=self._image_panel.GetSize()) diff --git a/src/gui/panels/extrinsics_panel.py b/src/gui/panels/extrinsics_panel.py index 503c007..07ca415 100644 --- a/src/gui/panels/extrinsics_panel.py +++ b/src/gui/panels/extrinsics_panel.py @@ -13,6 +13,7 @@ EmptyResponse, \ ExtrinsicCalibrator, \ ImageFormat, \ + ImageResolution, \ ImageUtils, \ IntrinsicCalibrator, \ MCTRequestSeries, \ @@ -54,6 +55,7 @@ logger = logging.getLogger(__name__) +_PREVIEW_CAPTURE_FORMAT: ImageFormat = ImageFormat.FORMAT_JPG class ExtrinsicsPanel(BasePanel): @@ -79,7 +81,7 @@ class ExtrinsicsPanel(BasePanel): _control_blocking_request_ids: set[uuid.UUID] _is_updating: bool # Some things should only trigger during explicit user events - _preview_request_ids: set[uuid.UUID] + _preview_request_ids_by_detector_label: dict[str, uuid.UUID] _preview_images_by_detector_label: dict[str, numpy.ndarray] _extrinsic_image: numpy.ndarray | None _current_capture_timestamp: datetime.datetime | None # None indicates no capture in progress @@ -102,7 +104,7 @@ def __init__( self._control_blocking_request_ids = set() self._is_updating = False - self._preview_request_ids = set() + self._preview_request_ids_by_detector_label = dict() self._preview_images_by_detector_label = dict() self._extrinsic_image = None self._current_capture_timestamp = None @@ -323,6 +325,8 @@ def handle_response_series( def on_page_select(self) -> None: super().on_page_select() + available_mixer_labels: list[str] = self._controller.get_active_mixer_labels() + self._mixer_selector.set_options(option_list=available_mixer_labels) self._update_ui_controls() def update_loop(self) -> None: @@ -330,23 +334,43 @@ def update_loop(self) -> None: self._is_updating = True response_series: MCTResponseSeries | None + responded_request_ids: list[tuple[uuid.UUID, MCTResponseSeries]] = list() for request_id in self._control_blocking_request_ids: _, response_series = self._controller.response_series_pop(request_series_id=request_id) if response_series is not None: + responded_request_ids.append((request_id, response_series)) + if len(responded_request_ids) > 0: + # Clean up the request id list first + for request_id, response_series in responded_request_ids: self._control_blocking_request_ids.remove(request_id) + # THEN handle the responses. This order of ops is assumed in calibration handling. + for request_id, response_series in responded_request_ids: self.handle_response_series(response_series) - self._update_ui_controls() + self._update_ui_controls() if self._preview_toggle_button.GetValue(): - for request_id in self._preview_request_ids: + detector_labels_with_responses: set[str] = set() + for detector_label, request_id in self._preview_request_ids_by_detector_label.items(): _, response_series = self._controller.response_series_pop(request_series_id=request_id) if response_series is not None and \ len(response_series.series) > 0 and \ isinstance(response_series.series[0], CameraImageGetResponse): response: CameraImageGetResponse = response_series.series[0] - detector_label: str = response_series.responder self._preview_images_by_detector_label[detector_label] = \ ImageUtils.base64_to_image(response.image_base64) + detector_labels_with_responses.add(detector_label) + detector_labels: list[str] = self._controller.get_active_detector_labels() + for detector_label in detector_labels: + if detector_label in detector_labels_with_responses or \ + detector_label not in self._preview_request_ids_by_detector_label: + request_series: MCTRequestSeries = MCTRequestSeries( + series=[CameraImageGetRequest( + format=_PREVIEW_CAPTURE_FORMAT, + requested_resolution=ImageResolution(x_px=800, y_px=480))]) # TODO: Parameterize + preview_request_id = self._controller.request_series_push( + connection_label=detector_label, + request_series=request_series) + self._preview_request_ids_by_detector_label[detector_label] = preview_request_id self._update_ui_image() @@ -633,25 +657,27 @@ def _update_ui_image(self): image_dimensions, image_positions = ImageUtils.partition_rect( available_size_px=available_size_px, partition_count=len(detector_labels)) - for detector_label, detector_index in enumerate(detector_labels): + for detector_index, detector_label in enumerate(detector_labels): if detector_label in self._preview_images_by_detector_label: detector_image: numpy.ndarray = self._preview_images_by_detector_label[detector_label] detector_image = ImageUtils.image_resize_to_fit( opencv_image=detector_image, available_size=image_dimensions) + offset_y_px: int = image_positions[detector_index][1] + (image_dimensions[1] - detector_image.shape[0]) // 2 + offset_x_px: int = image_positions[detector_index][0] + (image_dimensions[0] - detector_image.shape[1]) // 2 display_image[ - image_positions[detector_index][0]:image_positions[detector_index][0] + image_dimensions[0], - image_positions[detector_index][1]:image_positions[detector_index][1] + image_dimensions[1] + offset_y_px:offset_y_px + detector_image.shape[0], + offset_x_px:offset_x_px + detector_image.shape[1] ] = detector_image elif self._extrinsic_image is not None: extrinsic_image: numpy.ndarray = ImageUtils.image_resize_to_fit( opencv_image=self._extrinsic_image, available_size=available_size_px) - offset_x_px: int = (display_image.shape[0] - self._extrinsic_image.shape[0]) // 2 - offset_y_px: int = (display_image.shape[1] - self._extrinsic_image.shape[1]) // 2 + offset_y_px: int = (display_image.shape[0] - extrinsic_image.shape[0]) // 2 + offset_x_px: int = (display_image.shape[1] - extrinsic_image.shape[1]) // 2 display_image[ - offset_x_px:offset_x_px + self._extrinsic_image.shape[1], - offset_y_px:offset_y_px + self._extrinsic_image.shape[0], + offset_y_px:offset_y_px + extrinsic_image.shape[0], + offset_x_px:offset_x_px + extrinsic_image.shape[1], ] = extrinsic_image image_buffer: bytes = ImageUtils.image_to_bytes(image_data=display_image, image_format=".jpg") diff --git a/src/gui/panels/pose_solver_panel.py b/src/gui/panels/pose_solver_panel.py index 423fa44..73ecca2 100644 --- a/src/gui/panels/pose_solver_panel.py +++ b/src/gui/panels/pose_solver_panel.py @@ -163,7 +163,7 @@ def handle_response_series( def on_page_select(self) -> None: super().on_page_select() selected_pose_solver_label: str = self._pose_solver_selector.selector.GetStringSelection() - available_pose_solver_labels: list[str] = self._controller.get_active_pose_solver_labels() + available_pose_solver_labels: list[str] = self._controller.get_active_mixer_labels() self._pose_solver_selector.set_options(option_list=available_pose_solver_labels) if selected_pose_solver_label in available_pose_solver_labels: self._pose_solver_selector.selector.SetStringSelection(selected_pose_solver_label) @@ -212,7 +212,7 @@ def update_loop(self) -> None: self._latest_detector_frames[detector_label] = retrieved_detector_frame new_poses_available: bool = False - pose_solver_labels: list[str] = self._controller.get_active_pose_solver_labels() + pose_solver_labels: list[str] = self._controller.get_active_mixer_labels() for pose_solver_label in pose_solver_labels: retrieved_pose_solver_frame: MixerFrame = self._controller.get_live_pose_solver_frame( pose_solver_label=pose_solver_label) diff --git a/src/implementations/extrinsic_charuco_opencv.py b/src/implementations/extrinsic_charuco_opencv.py index 9f86f1b..c5b846a 100644 --- a/src/implementations/extrinsic_charuco_opencv.py +++ b/src/implementations/extrinsic_charuco_opencv.py @@ -197,14 +197,17 @@ def _calculate_implementation( timestamp_utc_iso8601=metadata.timestamp_utc_iso8601, detector_label=metadata.detector_label) intrinsic_parameters: IntrinsicParameters = self.detector_intrinsics_by_label[metadata.detector_label] - reference_to_initial: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + estimated: bool + reference_to_initial: Matrix4x4 + estimated, reference_to_initial = MathUtils.estimate_matrix_transform_to_detector( annotations=image_data.annotations, landmarks=charuco_target.landmarks, detector_intrinsics=intrinsic_parameters) - initial_to_reference: Matrix4x4 = reference_to_initial.inverse() - detector: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) - detector.initial_to_reference = initial_to_reference - detector.refined_to_reference = initial_to_reference + if estimated: + initial_to_reference: Matrix4x4 = reference_to_initial.inverse() + detector: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) + detector.initial_to_reference = initial_to_reference + detector.refined_to_reference = initial_to_reference for i in range(0, self.configuration.termination_iteration_count): # Update each ray based on the current pose @@ -212,7 +215,11 @@ def _calculate_implementation( for image_data in timestamp_data.images: if len(image_data.annotations) == 0: continue - detector_data: _DetectorData = data.get_detector_container(detector_label=image_data.detector_label) + detector_data: _DetectorData + try: + detector_data = data.get_detector_container(detector_label=image_data.detector_label) + except IndexError: + continue # Indicates that it was not possible to estimate the detector pose feature_labels: list[str] = [annotation.feature_label for annotation in image_data.annotations] ray_directions: list[list[float]] = MathUtils.convert_detector_points_to_vectors( points=image_data.annotations_as_points(), @@ -271,10 +278,17 @@ def _calculate_implementation( feature_label=timestamped_feature_label, x_px=annotation.x_px, y_px=annotation.y_px)) - reference_to_refined: Matrix4x4 = MathUtils.estimate_matrix_transform_to_detector( + estimated: bool + reference_to_refined: Matrix4x4 + estimated, reference_to_refined = MathUtils.estimate_matrix_transform_to_detector( annotations=annotations, landmarks=landmarks, detector_intrinsics=detector_data.intrinsic_parameters) + if not estimated: + raise NotImplemented( + "extrinsic_charuco_opencv: A detector pose was unable to get estimated. " + "This is not expected to occur and is not presently handled. " + "If you are seeing this, then please report that you are seeing this message.") refined_to_reference: Matrix4x4 = reference_to_refined.inverse() translation_change: float = numpy.linalg.norm( numpy.asarray(refined_to_reference.get_translation()) - diff --git a/src/mixer/app.py b/src/mixer/app.py index 01e45be..c680dee 100644 --- a/src/mixer/app.py +++ b/src/mixer/app.py @@ -69,12 +69,12 @@ async def set_intrinsic_parameters( ) -> EmptyResponse | ErrorResponse: return mixer.mixer_update_intrinsic_parameters(request=request) - @mixer_app.head("/start_capture") - async def start_capture() -> None: + @mixer_app.head("/start") + async def start() -> None: mixer.mixer_start() - @mixer_app.head("/stop_capture") - async def stop_capture() -> None: + @mixer_app.head("/stop") + async def stop() -> None: mixer.mixer_stop() @mixer_app.websocket("/websocket")