From 91fd0e5154832f5f4a1657bd140d7f90cc88a59e Mon Sep 17 00:00:00 2001 From: DJE98 Date: Sat, 30 Sep 2023 15:53:36 +0200 Subject: [PATCH] Added functions to detect emotions of multiple persons --- hackathon/vision.py | 56 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 hackathon/vision.py diff --git a/hackathon/vision.py b/hackathon/vision.py new file mode 100644 index 0000000..544bf91 --- /dev/null +++ b/hackathon/vision.py @@ -0,0 +1,56 @@ +import torch +import numpy as np + +def check_cuda_available(): + use_cuda = torch.cuda.is_available() + if use_cuda: + print("Cuda will be used for calculation") + return "cuda" + else: + print("Warning: CPU will be used for calculation.") + print("The calculation could be slowly.") + return "cpu" + + +def check_camera_available(capture): + if not capture.isOpened(): + print("Cannot open camera") + exit() + + +def detect_faces(mtcnn, frame): + bounding_boxes, probs = mtcnn.detect(frame, landmarks=False) + if bounding_boxes is not None: + bounding_boxes = bounding_boxes[probs > 0.9] + else: + bounding_boxes = [] + return bounding_boxes + + +def adjust_bounding_boxes(bounding_boxes, x_limit, y_limit): + limited_bounding_boxes = [] + for bounding_box in bounding_boxes: + box = bounding_box.astype(int) + x1, y1, x2, y2 = box[0:4] + xs = [x1, x2] + ys = [y1, y2] + xs.sort() + ys.sort() + xs[0] = np.clip(xs[0], 0, x_limit - 2) + xs[1] = np.clip(xs[1], 0, x_limit - 1) + ys[0] = np.clip(ys[0], 0, y_limit - 2) + ys[1] = np.clip(ys[1], 0, y_limit - 1) + + limited_box = [xs[0], ys[0], xs[1], ys[1]] + limited_bounding_boxes.append(limited_box) + return limited_bounding_boxes + + +def predict_emotions_from_faces(emotion_recognizer, frame, bounding_boxes): + emotions = [] + for bounding_box in bounding_boxes: + x1, y1, x2, y2 = bounding_box[0:4] + face_img = frame[y1:y2, x1:x2, :] + emotion, _ = emotion_recognizer.predict_emotions(face_img, logits=True) + emotions.append(emotion) + return emotions \ No newline at end of file