diff --git a/README.md b/README.md
index 19a275de23b891ddf61bb4e46634c85bdf533268..4fc9a247df3d79f1677af598ac18224d370963ed 100644
--- a/README.md
+++ b/README.md
@@ -45,6 +45,12 @@ This repository is the official implementation of the [CVPR 2022](https://cvpr20
     <a href="https://pytorchlightning.ai/"><img alt="Lightning" src="https://img.shields.io/badge/-Lightning-792ee5?logo=pytorchlightning&logoColor=white"></a>
     <a href='https://emoca.is.tue.mpg.de/' style='padding-left: 0.5rem;'>
       <img src='https://img.shields.io/badge/Project-Page-blue?style=flat&logo=Google%20chrome&logoColor=blue' alt='Project Page'></a>
+    <a href='https://youtu.be/zjMLB2-dVGw' style='padding-left: 0.5rem;'>
+      <img src='https://img.shields.io/badge/Youtube-Video-red?style=flat&logo=youtube&logoColor=red' alt='Youtube Video'>
+    </a>
+    <a href='https://ps.is.mpg.de/uploads_file/attachment/attachment/686/EMOCA__CVPR22.pdf'>
+      <img src='https://img.shields.io/badge/Paper-PDF-green?style=flat&logo=arXiv&logoColor=green' alt='Paper PDF'>
+    </a>
 </p>
 
 EMOCA takes a single in-the-wild image as input and reconstructs a 3D face with sufficient facial expression detail to convey the emotional state of the input image. EMOCA advances the state-of-the-art monocular face reconstruction in-the-wild, putting emphasis on accurate capture of emotional content. The official project page is [here](https://emoca.is.tue.mpg.de/index.html).
diff --git a/gdl_apps/EMOCA/demos/test_emoca_on_video.py b/gdl_apps/EMOCA/demos/test_emoca_on_video.py
index b49d47379ffdc2ee30bcd790b62792684788d3af..420c3579e7602dcad68e3b26811a6a324ea37cd5 100644
--- a/gdl_apps/EMOCA/demos/test_emoca_on_video.py
+++ b/gdl_apps/EMOCA/demos/test_emoca_on_video.py
@@ -31,7 +31,7 @@ def main():
     parser.add_argument('--input_video', type=str, default=str(Path(gdl.__file__).parents[1] / "data/EMOCA_test_example_data/videos/82-25-854x480_affwild2.mp4"), 
         help="Filename of the video for reconstruction.")
     parser.add_argument('--output_folder', type=str, default="video_output", help="Output folder to save the results to.")
-    parser.add_argument('--model_name', type=str, default='EMOCA', help='Name of the model to use.')
+    parser.add_argument('--model_name', type=str, default='EMOCA', help='Name of the model to use. Currently EMOCA or DECA are available.')
     parser.add_argument('--path_to_models', type=str, default=str(Path(gdl.__file__).parents[1] / "assets/EMOCA/models"))
     parser.add_argument('--save_images', type=bool, default=True, help="If true, output images will be saved")
     parser.add_argument('--save_codes', type=bool, default=False, help="If true, output FLAME values for shape, expression, jaw pose will be saved")