Skip to content
Snippets Groups Projects

Fixed errors, so that SMILE runner works

parent 4ddcd84f
No related branches found
No related tags found
No related merge requests found
...@@ -8,6 +8,7 @@ from tqdm import tqdm ...@@ -8,6 +8,7 @@ from tqdm import tqdm
import cv2 import cv2
import torch import torch
import numpy as np import numpy as np
import json
from runnertools import run_on_all_clips from runnertools import run_on_all_clips
...@@ -37,7 +38,7 @@ def parse_args(): ...@@ -37,7 +38,7 @@ def parse_args():
choices=["EMOCA_v2_lr_mse_20", "EMOCA_v2_lr_cos_1.5. EMOCA, EMOCA_v2_mp"], choices=["EMOCA_v2_lr_mse_20", "EMOCA_v2_lr_cos_1.5. EMOCA, EMOCA_v2_mp"],
help='Name of the model to use. Currently EMOCA or DECA are available.') help='Name of the model to use. Currently EMOCA or DECA are available.')
parser.add_argument('--path_to_models', '-pm', type=str, parser.add_argument('--path_to_models', '-pm', type=str,
default="/scratch/assets/EMOCA/models") default="assets/EMOCA/models")
parser.add_argument('--mode', '-M', type=str, parser.add_argument('--mode', '-M', type=str,
default="detail", default="detail",
choices=["detail", "coarse"], choices=["detail", "coarse"],
...@@ -61,9 +62,9 @@ def setup(path_to_models, model_name, mode): ...@@ -61,9 +62,9 @@ def setup(path_to_models, model_name, mode):
processed_subfolder=temp_dir, processed_subfolder=temp_dir,
batch_size=1, num_workers=1, face_detector="fan") batch_size=1, num_workers=1, face_detector="fan")
emoca, conf = load_model(path_to_models, model_name, mode) emoca_instance, conf = load_model(path_to_models, model_name, mode)
emoca.cuda() emoca_instance.cuda()
emoca.eval() emoca_instance.eval()
def processing_function(cap: cv2.VideoCapture, remaining_frames: int): def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
""" """
...@@ -83,7 +84,6 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int): ...@@ -83,7 +84,6 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
bbox_type, detection_landmarks, original_landmarks \ bbox_type, detection_landmarks, original_landmarks \
= face_detector._detect_faces_in_image(frame) = face_detector._detect_faces_in_image(frame)
if len(detection_images) == 0: if len(detection_images) == 0:
continue continue
...@@ -100,17 +100,28 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int): ...@@ -100,17 +100,28 @@ def processing_function(cap: cv2.VideoCapture, remaining_frames: int):
vals["landmarks"] = original_landmarks vals["landmarks"] = original_landmarks
vals["detection_landmarks"] = detection_landmarks vals["detection_landmarks"] = detection_landmarks
data[idx] = vals output = {
import pdb; pdb.set_trace() "shapecode": vals["shapecode"],
"texcode": vals["texcode"],
"expcode": vals["expcode"],
"posecode": vals["posecode"],
"cam": vals["cam"],
"landmarks": original_landmarks,
"detection_landmarks": detection_landmarks
}
data[idx] = output
return data return data
def main(): def main():
args = parse_args() args = parse_args()
video_root = Path(args.video_root_dir) video_root = Path(args.video_root_dir)
annotations = Path(args.annotation_file) annotations_path = Path(args.annotation_file)
output_file = Path(args.output_file) output_file = Path(args.output_file)
checkpoint = Path(args.checkpoint) if args.checkpoint is not None else None checkpoint = Path(args.checkpoint) if args.checkpoint is not None else None
annotations = json.load(annotations_path.open("r"))
setup(args.path_to_models, args.model_name, args.mode) setup(args.path_to_models, args.model_name, args.mode)
run_on_all_clips(processing_function, annotations, video_root, output_file, checkpoint=checkpoint) run_on_all_clips(processing_function, annotations, video_root, output_file, checkpoint=checkpoint)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment