diff --git a/gdl_apps/EMOCA/demos/test_emoca_on_images.py b/gdl_apps/EMOCA/demos/test_emoca_on_images.py
index 8b8558f92a66d689345a4d81eaab127199d22fdf..8ea98345ae0c91decdc235406ffa78601cd2e015 100644
--- a/gdl_apps/EMOCA/demos/test_emoca_on_images.py
+++ b/gdl_apps/EMOCA/demos/test_emoca_on_images.py
@@ -34,25 +34,26 @@ from gdl_apps.EMOCA.utils.io import save_obj, save_images, save_codes, test
 def main():
     parser = argparse.ArgumentParser()
     # add the input folder arg 
-    parser.add_argument('--input_folder', type=str, default= str(Path(gdl.__file__).parents[1] / "data/EMOCA_test_example_data/images/affectnet_test_examples"))
+    parser.add_argument('--input_folder', type=str, default= str(Path(gdl.__file__).parents[1] / "assets/data/EMOCA_test_example_data/images/affectnet_test_examples"))
     parser.add_argument('--output_folder', type=str, default="image_output", help="Output folder to save the results to.")
     parser.add_argument('--model_name', type=str, default='EMOCA', help='Name of the model to use.')
     parser.add_argument('--path_to_models', type=str, default=str(Path(gdl.__file__).parents[1] / "assets/EMOCA/models"))
     parser.add_argument('--save_images', type=bool, default=True, help="If true, output images will be saved")
     parser.add_argument('--save_codes', type=bool, default=False, help="If true, output FLAME values for shape, expression, jaw pose will be saved")
     parser.add_argument('--save_mesh', type=bool, default=False, help="If true, output meshes will be saved")
+    parser.add_argument('--mode', type=str, default='detail', help="coarse or detail")
     
     args = parser.parse_args()
 
-
     # path_to_models = '/ps/scratch/rdanecek/emoca/finetune_deca'
     # path_to_models = '/is/cluster/work/rdanecek/emoca/finetune_deca'
     path_to_models = args.path_to_models
     input_folder = args.input_folder
-    output_folder = args.output_folder
     model_name = args.model_name
+    output_folder = args.output_folder + "/" + model_name
 
-    mode = 'detail'
+    mode = args.mode
+    # mode = 'detail'
     # mode = 'coarse'
 
     # 1) Load the model
diff --git a/gdl_apps/EMOCA/demos/test_emoca_on_video.py b/gdl_apps/EMOCA/demos/test_emoca_on_video.py
index c0b2dac2cf35074531296e2a8836d1fb5d727acf..e71ed04770ef3561b4aa6e8723e94f4c365d06d2 100644
--- a/gdl_apps/EMOCA/demos/test_emoca_on_video.py
+++ b/gdl_apps/EMOCA/demos/test_emoca_on_video.py
@@ -39,8 +39,8 @@ def str2bool(v):
 def reconstruct_video(args):
     path_to_models = args.path_to_models
     input_video = args.input_video
-    output_folder = args.output_folder
     model_name = args.model_name
+    output_folder = args.output_folder + "/" + model_name
     image_type = args.image_type
     black_background = args.black_background
     include_original = args.include_original
@@ -130,7 +130,7 @@ def reconstruct_video(args):
 
 def parse_args():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--input_video', type=str, default=str(Path(gdl.__file__).parents[1] / "data/EMOCA_test_example_data/videos/82-25-854x480_affwild2.mp4"), 
+    parser.add_argument('--input_video', type=str, default=str(Path(gdl.__file__).parents[1] / "/assets/data/EMOCA_test_example_data/videos/82-25-854x480_affwild2.mp4"), 
         help="Filename of the video for reconstruction.")
     parser.add_argument('--output_folder', type=str, default="video_output", help="Output folder to save the results to.")
     parser.add_argument('--model_name', type=str, default='EMOCA', help='Name of the model to use. Currently EMOCA or DECA are available.')