diff --git a/Extract_HaMeR_Multi.py b/Extract_HaMeR_Multi.py index fb2a4322181ac806467e88a45d1fb485ad38510d..c488c489be7d0faee6664a1818520771fe4443d7 100644 --- a/Extract_HaMeR_Multi.py +++ b/Extract_HaMeR_Multi.py @@ -219,9 +219,13 @@ def main(args, model, renderer, device): else: # Hardcoded bbox. This assumes person is in the center and that there is always one person in the image - h, w, _ = img_cv2.shape - pred_bboxes = np.array([[0, 0, w, h]]) - print(f"Using hardcoded bbox, {pred_bboxes[0]}") + if args.custom_bbox == '': + h, w, _ = img_cv2.shape + pred_bboxes = np.array([[0, 0, w, h]]) + print(f"Using hardcoded bbox, {pred_bboxes[0]}") + else: + pred_bboxes = np.array([list(map(int, args.custom_bbox.split(',')))]) + print(f"Using custom bbox, {pred_bboxes[0]}") # Force confidence to be 0.99 that human is present pred_scores = np.array([0.99]) @@ -328,6 +332,7 @@ if __name__ == '__main__': parser.add_argument('--body_detector', type=str, default='vitdet', choices=['vitdet', 'regnety'], help='Using regnety improves runtime and reduces memory') parser.add_argument('--file_type', nargs='+', default=['*.jpg', '*.png'], help='List of file extensions to consider') parser.add_argument('--bbox', type=bool, default=True, help= 'If set, use provided bbox from ViT') + parser.add_argument('--custom_bbox', type=str, default='', help='Custom bbox in the format x1,y1,x2,y2') parser.add_argument('--MANO_Output', type=bool, default=False, help= 'If set, generate output images') args = parser.parse_args() diff --git a/Extract_HaMeR_Single.py b/Extract_HaMeR_Single.py index 9aaacf8bd4d7816b62d8ea429b6ae5fabcf31cc9..fb9e5bea54f312dc7118800fe5d3a8ad488ae23d 100644 --- a/Extract_HaMeR_Single.py +++ b/Extract_HaMeR_Single.py @@ -220,9 +220,13 @@ def main(args, model, renderer, device): return else: # Hardcoded bbox. This assumes person is in the center and that there is always one person in the image - h, w, _ = img_cv2.shape - pred_bboxes = np.array([[0, 0, w, h]]) - print(f"Using hardcoded bbox, {pred_bboxes[0]}") + if args.custom_bbox == '': + h, w, _ = img_cv2.shape + pred_bboxes = np.array([[0, 0, w, h]]) + print(f"Using hardcoded bbox, {pred_bboxes[0]}") + else: + pred_bboxes = np.array([list(map(int, args.custom_bbox.split(',')))]) + print(f"Using custom bbox, {pred_bboxes[0]}") # Force confidence to be 0.99 that human is present pred_scores = np.array([0.99]) @@ -325,6 +329,7 @@ if __name__ == '__main__': parser.add_argument('--body_detector', type=str, default='vitdet', choices=['vitdet', 'regnety'], help='Using regnety improves runtime and reduces memory') parser.add_argument('--file_type', nargs='+', default=['*.jpg', '*.png'], help='List of file extensions to consider') parser.add_argument('--bbox', type=bool, default=True, help= 'If set, use provided bbox from ViT') + parser.add_argument('--custom_bbox', type=str, default='', help='Custom bbox in the format x1,y1,x2,y2') parser.add_argument('--MANO_Output', type=bool, default=False, help= 'If set, generate output images') args = parser.parse_args() diff --git a/Inject_Json.py b/Inject_Json.py index 6b4375ae0e303071064c31399a6b5fda4acd5811..20c68421df9056f81f6c1fccef92ac1bb4a010c5 100644 --- a/Inject_Json.py +++ b/Inject_Json.py @@ -104,6 +104,11 @@ def main(args, model, device): image_folder = Path(args.img_folder) # Setup Image Paths and Json Paths Ranges + if args.json_folder.endswith('.tar.xz'): + cmd = f"tar -xf {args.json_folder} -C {temp_dir.name}" + os.system(cmd) + args.json_folder = os.path.join(temp_dir.name, os.path.basename(args.json_folder).removesuffix('.tar.xz')) + img_paths = [img for end in args.file_type for img in image_folder.glob(end)] img_paths = sorted(img_paths,key = lambda x: int(os.path.basename(x).removesuffix('.png').removeprefix('Frame'))) json_list = [os.path.join(args.json_folder, f) for f in os.listdir(args.json_folder) if f.endswith('.json')] @@ -138,9 +143,13 @@ def main(args, model, device): return else: # Hardcoded bbox. This assumes person is in the center and that there is always one person in the image - h, w, _ = img_cv2.shape - pred_bboxes = np.array([[0, 0, w, h]]) - print(f"Using hardcoded bbox, {pred_bboxes[0]}") + if args.custom_bbox == '': + h, w, _ = img_cv2.shape + pred_bboxes = np.array([[0, 0, w, h]]) + print(f"Using hardcoded bbox, {pred_bboxes[0]}") + else: + pred_bboxes = np.array([list(map(int, args.custom_bbox.split(',')))]) + print(f"Using custom bbox, {pred_bboxes[0]}") # Force confidence to be 0.99 that human is present pred_scores = np.array([0.99]) @@ -300,12 +309,13 @@ if __name__ == '__main__': parser.add_argument('--file_type', nargs='+', default=['*.jpg', '*.png'], help='List of file extensions to consider') parser.add_argument('--injected_hand', type=int, default=2, help='Number of hands in the Video') parser.add_argument('--bbox', dest='bbox', action='store_true', default=True, help='If set, use detected bbox') + parser.add_argument('--custom_bbox', type=str, default='', help='Custom bbox in the format x1,y1,x2,y2') parser.add_argument('--json_folder', type=str, default='', help='Json file for input') parser.add_argument('--frame_range', type=str, default='0,-1', help='Frame range for input') args = parser.parse_args() args.vid = '/vol/vssp/datasets/mixedmode/mein-dgs-korpus/CROPPED_VIDEOS/1176549/1176549_1a1.cropped.mp4' - args.json_folder = '/vol/research/signVision/Projects/BSLboundaries/MeinDGS_Feature/1176549/1176549_1a1.cropped' + args.json_folder = '/vol/research/signVision/Projects/BSLboundaries/MeinDGS_HaMeR_Feature/1176549/1176549_1a1.cropped.tar.xz' args.out_folder = '/vol/research/signVision/Projects/BSLboundaries/Test' args.frame_range = '0,150' args.bbox = False