diff --git a/gdl/datasets/FaceVideoDataModule.py b/gdl/datasets/FaceVideoDataModule.py
index f039e5ef864b426b73d2ffba0cc65f224f01e849..1009415de2038c6932a84f1dd45fd00ee9d1a9ed 100644
--- a/gdl/datasets/FaceVideoDataModule.py
+++ b/gdl/datasets/FaceVideoDataModule.py
@@ -874,9 +874,13 @@ class FaceVideoDataModule(FaceDataModuleBase):
         self.video_metas = []
         for vi, vid_file in enumerate(tqdm(self.video_list)):
             vid = ffmpeg.probe(str( Path(self.root_dir) / vid_file))
-            codec_idx = [idx for idx in range(len(vid)) if vid['streams'][idx]['codec_type'] == 'video']
-            if len(codec_idx) > 1:
-                raise RuntimeError("Video file has two video streams! '%s'" % str(vid_file))
+            # codec_idx = [idx for idx in range(len(vid)) if vid['streams'][idx]['codec_type'] == 'video']
+            codec_idx = [idx for idx in range(len(vid)) if vid['streams'][0]['codec_type'] == 'video']
+            if len(codec_idx) == 0:
+                raise RuntimeError("Video file has no video streams! '%s'" % str(vid_file))
+            # if len(codec_idx) > 1:
+                # raise RuntimeError("Video file has two video streams! '%s'" % str(vid_file))
+            print("[WARNING] Video file has %d video streams. Only the first one will be processed" % len(codec_idx))
             codec_idx = codec_idx[0]
             vid_info = vid['streams'][codec_idx]
             vid_meta = {}
diff --git a/gdl_apps/EMOCA/demos/test_emoca_on_video.py b/gdl_apps/EMOCA/demos/test_emoca_on_video.py
index 203fd9482cbceeabf8a44f0f7e908891adba39d3..b49d47379ffdc2ee30bcd790b62792684788d3af 100644
--- a/gdl_apps/EMOCA/demos/test_emoca_on_video.py
+++ b/gdl_apps/EMOCA/demos/test_emoca_on_video.py
@@ -43,15 +43,21 @@ def main():
     parser.add_argument('--processed_subfolder', type=str, default=None, 
         help="If you want to resume previously interrupted computation over a video, make sure you specify" \
             "the subfolder where the got unpacked. It will be in format 'processed_%Y_%b_%d_%H-%M-%S'")
+    parser.add_argument('--cat_dim', type=int, default=0, 
+        help="The result video will be concatenated vertically if 0 and horizontally if 1")
+    parser.add_argument('--include_transparent', type=bool, default=False, 
+        help="Apart from the reconstruction video, also a video with the transparent mesh will be added")
     args = parser.parse_args()
-
+    print("Path to models " + args.path_to_models)
     path_to_models = args.path_to_models
     input_video = args.input_video
     output_folder = args.output_folder
     model_name = args.model_name
     image_type = args.image_type
-    # processed_subfolder = args.processed_subfolder
-    processed_subfolder = "processed_2022_Jan_15_15-03-37"
+    cat_dim = args.cat_dim
+    include_transparent = bool(args.include_transparent)
+    print("Include transparent:", include_transparent)
+    processed_subfolder = args.processed_subfolder
 
     mode = 'detail'
     # mode = 'coarse'
@@ -96,7 +102,8 @@ def main():
                 save_codes(Path(outfolder), name, vals, i)
 
     ## 5) Create the reconstruction video (reconstructions overlayed on the original video)
-    dm.create_reconstruction_video(0,  rec_method=model_name, image_type=image_type, overwrite=True)
+    dm.create_reconstruction_video(0,  rec_method=model_name, image_type=image_type, overwrite=True, 
+            cat_dim=cat_dim, include_transparent=include_transparent)
     print("Done")