diff --git a/config.yaml b/config.yaml index c5a441b9..5d1dd36a 100644 --- a/config.yaml +++ b/config.yaml @@ -3,6 +3,7 @@ queue_max_size: 10 log_directory_path: "logs" +profiling_length: 300 video_input: camera_name: 0 @@ -19,3 +20,6 @@ flight_interface: address: "tcp:127.0.0.1:14550" timeout: 10.0 # seconds worker_period: 0.1 # seconds + +data_merge: + timeout: 10.0 # seconds diff --git a/main_2024.py b/main_2024.py index 9dba7564..17d37306 100644 --- a/main_2024.py +++ b/main_2024.py @@ -15,6 +15,7 @@ from modules.detect_target import detect_target_worker from modules.flight_interface import flight_interface_worker from modules.video_input import video_input_worker +from modules.data_merge import data_merge_worker from utilities.workers import queue_proxy_wrapper from utilities.workers import worker_controller from utilities.workers import worker_manager @@ -76,6 +77,8 @@ def main() -> int: FLIGHT_INTERFACE_ADDRESS = config["flight_interface"]["address"] FLIGHT_INTERFACE_TIMEOUT = config["flight_interface"]["timeout"] FLIGHT_INTERFACE_WORKER_PERIOD = config["flight_interface"]["worker_period"] + + DATA_MERGE_TIMEOUT = config["data_merge"]["timeout"] except KeyError: print("Config key(s) not found") return -1 @@ -90,13 +93,17 @@ def main() -> int: mp_manager, QUEUE_MAX_SIZE, ) - detect_target_to_main_queue = queue_proxy_wrapper.QueueProxyWrapper( + detect_target_to_data_merge_queue = queue_proxy_wrapper.QueueProxyWrapper( mp_manager, QUEUE_MAX_SIZE, ) - flight_interface_to_main_queue = queue_proxy_wrapper.QueueProxyWrapper( + flight_interface_to_data_merge_queue = queue_proxy_wrapper.QueueProxyWrapper( mp_manager, - QUEUE_MAX_SIZE + QUEUE_MAX_SIZE, + ) + data_merge_to_main_queue = queue_proxy_wrapper.QueueProxyWrapper( + mp_manager, + QUEUE_MAX_SIZE, ) video_input_manager = worker_manager.WorkerManager() @@ -123,7 +130,7 @@ def main() -> int: DETECT_TARGET_SHOW_ANNOTATED, DETECT_TARGET_SAVE_PREFIX, video_input_to_detect_target_queue, - detect_target_to_main_queue, + detect_target_to_data_merge_queue, controller, ), ) @@ -136,7 +143,20 @@ def main() -> int: FLIGHT_INTERFACE_ADDRESS, FLIGHT_INTERFACE_TIMEOUT, FLIGHT_INTERFACE_WORKER_PERIOD, - flight_interface_to_main_queue, + flight_interface_to_data_merge_queue, + controller, + ), + ) + + data_merge_manager = worker_manager.WorkerManager() + data_merge_manager.create_workers( + 1, + data_merge_worker.data_merge_worker, + ( + DATA_MERGE_TIMEOUT, + detect_target_to_data_merge_queue, + flight_interface_to_data_merge_queue, + data_merge_to_main_queue, controller, ), ) @@ -145,36 +165,29 @@ def main() -> int: video_input_manager.start_workers() detect_target_manager.start_workers() flight_interface_manager.start_workers() + data_merge_manager.start_workers() while True: try: - detections = detect_target_to_main_queue.queue.get_nowait() + merged_data = data_merge_to_main_queue.queue.get_nowait() except queue.Empty: - detections = None - - if detections is not None: - print("timestamp: " + str(detections.timestamp)) - print("detections: " + str(len(detections.detections))) - for detection in detections.detections: - print(" label: " + str(detection.label)) - print(" confidence: " + str(detection.confidence)) - print("") - - odometry_and_time_info: "odometry_and_time.OdometryAndTime | None" = \ - flight_interface_to_main_queue.queue.get() - - if odometry_and_time_info is not None: - timestamp = odometry_and_time_info.timestamp - position = odometry_and_time_info.odometry_data.position - orientation = odometry_and_time_info.odometry_data.orientation.orientation - - print("timestamp: " + str(timestamp)) - print("north: " + str(position.north)) - print("east: " + str(position.east)) - print("down: " + str(position.down)) - print("yaw: " + str(orientation.yaw)) - print("roll: " + str(orientation.roll)) - print("pitch: " + str(orientation.pitch)) + merged_data = None + + if merged_data is not None: + position = merged_data.odometry_local.position + orientation = merged_data.odometry_local.orientation.orientation + detections = merged_data.detections + + print("merged north: " + str(position.north)) + print("merged east: " + str(position.east)) + print("merged down: " + str(position.down)) + print("merged yaw: " + str(orientation.yaw)) + print("merged roll: " + str(orientation.roll)) + print("merged pitch: " + str(orientation.pitch)) + print("merged detections count: " + str(len(detections))) + for detection in detections: + print("merged label: " + str(detection.label)) + print("merged confidence: " + str(detection.confidence)) print("") if cv2.waitKey(1) == ord('q'): @@ -185,12 +198,14 @@ def main() -> int: controller.request_exit() video_input_to_detect_target_queue.fill_and_drain_queue() - detect_target_to_main_queue.fill_and_drain_queue() - flight_interface_to_main_queue.fill_and_drain_queue() + detect_target_to_data_merge_queue.fill_and_drain_queue() + flight_interface_to_data_merge_queue.fill_and_drain_queue() + data_merge_to_main_queue.fill_and_drain_queue() video_input_manager.join_workers() detect_target_manager.join_workers() flight_interface_manager.join_workers() + data_merge_manager.join_workers() cv2.destroyAllWindows() diff --git a/modules/detect_target/detect_target.py b/modules/detect_target/detect_target.py index e49134e4..f0c3e54a 100644 --- a/modules/detect_target/detect_target.py +++ b/modules/detect_target/detect_target.py @@ -54,6 +54,8 @@ def run(self, Return: Success and the detections. """ + start_time = time.time() + image = data.image predictions = self.__model.predict( source=image, @@ -90,6 +92,22 @@ def run(self, assert detection is not None detections.append(detection) + stop_time = time.time() + + elapsed_time = stop_time - start_time + + for pred in predictions: + with open('profiler.txt', 'a') as file: + speeds = pred.speed + preprocess_speed = round(speeds['preprocess'], 3) + inference_speed = round(speeds['inference'], 3) + postprocess_speed = round(speeds['postprocess'], 3) + elapsed_time_ms = elapsed_time * 1000 + precision_string = "half" if self.__enable_half_precision else "full" + + + file.write(f"{preprocess_speed}, {inference_speed}, {postprocess_speed}, {elapsed_time_ms}, {precision_string}\n") + # Logging if self.__filename_prefix != "": filename = self.__filename_prefix + str(self.__counter) diff --git a/profiler/__init__.py b/profiler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/profiler_detect_target_2024.py b/profiler_detect_target_2024.py new file mode 100644 index 00000000..f539a186 --- /dev/null +++ b/profiler_detect_target_2024.py @@ -0,0 +1,185 @@ +""" +Profile detect target using full/half precision. +""" +import multiprocessing as mp +import time +import gc +import pathlib +import yaml +import argparse +import cv2 + + +import numpy as np +import os +import pandas as pd + + +from modules.detect_target import detect_target +from modules.image_and_time import ImageAndTime + + + + + + +CONFIG_FILE_PATH = pathlib.Path("config.yaml") + + +GRASS_DATA_DIR = "profiler/profile_data/Grass" +ASPHALT_DATA_DIR = "profiler/profile_data/Asphalt" +FIELD_DATA_DIR = "profiler/profile_data/Field" + + +MS_TO_NS_CONV = 1000000 + + + + +def load_images(dir): + images = [] + for filename in os.listdir(dir): + if filename.endswith(".png"): + img = cv2.imread(os.path.join(dir, filename)) + if img is not None: + success, image_with_time = ImageAndTime.create(img) + if success: + images.append(image_with_time) + return images + + +def profile_detector(detector: detect_target.DetectTarget, images: "list[np.ndarray]") -> dict: + times_arr = [] + for image in images: + gc.disable() # This disables the garbage collector + start = time.time_ns() + result, value = detector.run(image) # Might or might not want to keep the bounding boxes + end = time.time_ns() + gc.enable() # This enables the garbage collector + if not result: + pass + # Handle error + else: + times_arr.append(end - start) + + if len(times_arr) > 0: + average = np.nanmean(times_arr) / MS_TO_NS_CONV + mins = np.nanmin(times_arr) /MS_TO_NS_CONV + maxs = np.nanmax(times_arr) / MS_TO_NS_CONV + median = np.median(times_arr) /MS_TO_NS_CONV + else: + average, mins, maxs, median = -1,-1,-1,-1 + + + data = { + "Average (ms)": average, + "Min (ms)": mins, + "Max (ms)": maxs, + "Median (ms)": median + } + + + + + # Create and prints DF + return data + + +def run_detector(detector_full: detect_target.DetectTarget, detector_half: detect_target.DetectTarget, images: "list[np.ndarray]") -> pd.DataFrame: + # Initial run just to warm up CUDA + _ = profile_detector(detector_full, images[:10]) + half_data = profile_detector(detector_half, images) + full_data = profile_detector(detector_full, images) + + + full_df = pd.DataFrame(full_data, index=['full']) + half_df = pd.DataFrame(half_data, index=['half']) + return pd.concat([half_df, full_df]) + + +def main() -> int: + #Configurations + try: + with CONFIG_FILE_PATH.open("r", encoding="utf8") as file: + try: + config = yaml.safe_load(file) + except yaml.YAMLError as exc: + print(f"Error parsing YAML file: {exc}") + return -1 + except FileNotFoundError: + print(f"File not found: {CONFIG_FILE_PATH}") + return -1 + except IOError as exc: + print(f"Error when opening file: {exc}") + return -1 + + + + parser = argparse.ArgumentParser() + parser.add_argument("--cpu", action="store_true", help="option to force cpu") + args = parser.parse_args() + + + DETECT_TARGET_MODEL_PATH = config["detect_target"]["model_path"] + DETECT_TARGET_DEVICE = "cpu" if args.cpu else config["detect_target"]["device"] + + + #Optional logging parameters + LOG_DIRECTORY_PATH = config["log_directory_path"] + DETECT_TARGET_SAVE_NAME_PREFIX = config["detect_target"]["save_prefix"] + DETECT_TARGET_SAVE_PREFIX = f"{LOG_DIRECTORY_PATH}/{DETECT_TARGET_SAVE_NAME_PREFIX}" + + + #Creating detector instances + detector_half = detect_target.DetectTarget( + DETECT_TARGET_DEVICE, + DETECT_TARGET_MODEL_PATH, + False, + "" #not logging imgs + ) + detector_full = detect_target.DetectTarget( + DETECT_TARGET_DEVICE, + DETECT_TARGET_MODEL_PATH, + True, #forces full precision + "" #not logging imgs + ) + + + #Loading images + grass_images = load_images(GRASS_DATA_DIR) + asphalt_images = load_images(ASPHALT_DATA_DIR) + field_images = load_images(FIELD_DATA_DIR) + + + #Running detector + grass_results = run_detector(detector_full, detector_half, grass_images) + asphalt_results = run_detector(detector_full, detector_half, asphalt_images) + field_results = run_detector(detector_full, detector_half, field_images) + + + + + #Printing results to console + print("=================GRASS==================") + print(grass_results) + print("=================ASPHALT==================") + print(asphalt_results) + print("=================FIELD==================") + print(field_results) + + + #save to csvs + grass_results.to_csv(f"profiler/profile_data/results/results_grass.csv") + asphalt_results.to_csv(f"profiler/profile_data/results/results_asphalt.csv") + field_results.to_csv(f"profiler/profile_data/results/results_field.csv") + + +if __name__ == "__main__": + main() + + + + + + +