diff --git a/rpi/assets/models/epoch-200.pt b/rpi/assets/models/epoch-200.pt new file mode 100644 index 00000000..05dda92c Binary files /dev/null and b/rpi/assets/models/epoch-200.pt differ diff --git a/rpi/board-detector/paths.py b/rpi/board-detector/paths.py index 5a6d5e27..9740a315 100644 --- a/rpi/board-detector/paths.py +++ b/rpi/board-detector/paths.py @@ -1,3 +1,3 @@ -model_path = "C:/Users/Laurent/Desktop/board-mate/rpi/assets/models/epoch-130.pt" +model_path = "C:/Users/Laurent/Desktop/board-mate/rpi/assets/models/epoch-200.pt" #img_path = "./test/4.jpg" img_path = "../training/datasets/unified/train/images/WIN_20221220_11_27_27_Pro_jpg.rf.4f01cb68c8944ef1c4c7dc57847b4cd3.jpg" diff --git a/rpi/board-detector/realtime_detect.py b/rpi/board-detector/realtime_detect.py new file mode 100644 index 00000000..f4341001 --- /dev/null +++ b/rpi/board-detector/realtime_detect.py @@ -0,0 +1,41 @@ +from ultralytics import YOLO +from paths import * # make sure model_path is defined here +import cv2 + +if __name__ == "__main__": + + print("Initializing model...") + model = YOLO(model_path) + + print("Initializing camera...") + cap = cv2.VideoCapture(0) + cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) + cap.set(cv2.CAP_PROP_FPS, 30) + + print("Initialized") + if not cap.isOpened(): + print("Error: Could not open camera") + exit() + + cv2.namedWindow("Predictions", cv2.WINDOW_NORMAL) + + while True: + ret, frame = cap.read() + if not ret: + print("Error: Failed to grab frame") + break + + # Optional: resize frame to improve YOLO performance + # frame = cv2.resize(frame, (416, 416)) + + results = model.predict(source=frame, conf=0.5) + + annotated_frame = results[0].plot() # annotated frame as NumPy array + + cv2.imshow("Predictions", annotated_frame) + cv2.resizeWindow("Predictions", 640, 640) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + cap.release() + cv2.destroyAllWindows()