diff --git a/gdl_apps/EMOCA/runners/smile_runner.py b/gdl_apps/EMOCA/runners/smile_runner.py
index cfb04304a048eeaa84d5f7a64c16a2f7defa9685..a9c70752295939bc128236ff975de97c9a431a40 100644
--- a/gdl_apps/EMOCA/runners/smile_runner.py
+++ b/gdl_apps/EMOCA/runners/smile_runner.py
@@ -8,6 +8,7 @@ from tqdm import tqdm
 import cv2
 import torch
 import numpy as np
+import json
 
 from runnertools import run_on_all_clips
 
@@ -37,7 +38,7 @@ def parse_args():
                         choices=["EMOCA_v2_lr_mse_20", "EMOCA_v2_lr_cos_1.5. EMOCA, EMOCA_v2_mp"],
                         help='Name of the model to use. Currently EMOCA or DECA are available.')
     parser.add_argument('--path_to_models', '-pm', type=str, 
-                        default="/scratch/assets/EMOCA/models")
+                        default="assets/EMOCA/models")
     parser.add_argument('--mode', '-M', type=str, 
                         default="detail", 
                         choices=["detail", "coarse"], 
@@ -61,9 +62,9 @@ def setup(path_to_models, model_name, mode):
         processed_subfolder=temp_dir, 
         batch_size=1, num_workers=1, face_detector="fan")
 
-    emoca, conf = load_model(path_to_models, model_name, mode)
-    emoca.cuda()
-    emoca.eval()
+    emoca_instance, conf = load_model(path_to_models, model_name, mode)
+    emoca_instance.cuda()
+    emoca_instance.eval()
 
 def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
     """
@@ -83,7 +84,6 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
         bbox_type, detection_landmarks, original_landmarks \
             = face_detector._detect_faces_in_image(frame)
 
-
         if len(detection_images) == 0:
             continue
             
@@ -100,17 +100,28 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
         vals["landmarks"] = original_landmarks
         vals["detection_landmarks"] = detection_landmarks
 
-        data[idx] = vals
-        import pdb; pdb.set_trace()
+        output = {
+            "shapecode": vals["shapecode"],
+            "texcode": vals["texcode"],
+            "expcode": vals["expcode"],
+            "posecode": vals["posecode"],
+            "cam": vals["cam"],
+            "landmarks": original_landmarks,
+            "detection_landmarks": detection_landmarks
+        }
+
+        data[idx] = output
     return data
 
 def main():
     args = parse_args()
     video_root = Path(args.video_root_dir)
-    annotations = Path(args.annotation_file)
+    annotations_path = Path(args.annotation_file)
     output_file = Path(args.output_file)
     checkpoint = Path(args.checkpoint) if args.checkpoint is not None else None
 
+    annotations = json.load(annotations_path.open("r"))
+
     setup(args.path_to_models, args.model_name, args.mode)
     run_on_all_clips(processing_function, annotations, video_root, output_file, checkpoint=checkpoint)