From d74c58c06ed4c3609bf09abbdebebe837865b0c5 Mon Sep 17 00:00:00 2001
From: "Symeonidis-Herzig, Alexandre (PG/R - Comp Sci & Elec Eng)"
 <as03095@surrey.ac.uk>
Date: Wed, 14 Feb 2024 09:39:17 +0000
Subject: [PATCH] Slightly Flawed Set-Up for First Reprocess

---
 gdl_apps/EMOCA/runners/smile_runner.py | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/gdl_apps/EMOCA/runners/smile_runner.py b/gdl_apps/EMOCA/runners/smile_runner.py
index a9c70752..c8d88b8d 100644
--- a/gdl_apps/EMOCA/runners/smile_runner.py
+++ b/gdl_apps/EMOCA/runners/smile_runner.py
@@ -24,15 +24,17 @@ def parse_args():
                         default=str("/vol/vssp/LF_datasets/multiview/smile-ii/PARTICIPANTS"), 
                         help="Directory with all the SMILE-II Video.")
     parser.add_argument('--annotation_file', '-a' , type=str,
-                        default=str("/vol/research/signFaces/non_manual_annotations/clean_annotations_l1.json"), 
+                        default=str("/vol/research/signFaces/non_manual_annotations/clean_annotations_l2.json"), 
                         help="Directory with all the SMILE-II Video.")
     parser.add_argument('--output_file', '-o' , type=str,
-                        default=str("/vol/research/signFaces/SMILE-II-Reprocessed/L1.pkl"), 
+                        default=str("/vol/research/signFaces/SMILE-II-Reprocessed/L2-Flipped.pkl"), 
                         help="Location to save the output file.")
     parser.add_argument('--checkpoint', '-c' , type=str,
                         default=None, 
                         help="Location to save the output file.")
 
+    parser.add_argument('--flip', '-f', action="store_true",
+                        help="Flip the video before processing.")
     parser.add_argument('--model_name', '-m', type=str,
                         default='EMOCA_v2_lr_mse_20', 
                         choices=["EMOCA_v2_lr_mse_20", "EMOCA_v2_lr_cos_1.5. EMOCA, EMOCA_v2_mp"],
@@ -54,6 +56,8 @@ def parse_args():
 
 
 face_detector, emoca_instance = None, None
+flip = False
+
 def setup(path_to_models, model_name, mode):
     global face_detector, emoca_instance
     with tempfile.TemporaryDirectory() as temp_dir:
@@ -70,14 +74,16 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
     """
     Reconstructs a video with the EMOCA model.
     """   
-    global face_detector, emoca_instance
+    global face_detector, emoca_instance, flip
     # Run the EMOCA model on each frame
     data = np.zeros((remaining_frames), dtype=dict)
-    for idx in tqdm(range(remaining_frames)):
+    for idx in tqdm(range(remaining_frames), leave=False):
         # Read from Capture
         ret, frame = cap.read()
         if not ret:
             break
+        if flip:
+            frame = cv2.flip(frame, 1)
 
         # Run detection
         detection_images, detection_centers, detection_sizes, \
@@ -114,11 +120,13 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
     return data
 
 def main():
-    args = parse_args()
+    global flip
+    args = parse_args() 
     video_root = Path(args.video_root_dir)
     annotations_path = Path(args.annotation_file)
     output_file = Path(args.output_file)
     checkpoint = Path(args.checkpoint) if args.checkpoint is not None else None
+    flip = args.flip
 
     annotations = json.load(annotations_path.open("r"))
 
-- 
GitLab