From 45d9dd738361f1c3fbab99d39360cc7f440b4787 Mon Sep 17 00:00:00 2001
From: JianHe0628 <jl02958@surrey.ac.uk>
Date: Wed, 20 Nov 2024 17:51:07 +0000
Subject: [PATCH] adding custom bbox option

---
 Extract_HaMeR_Multi.py  | 11 ++++++++---
 Extract_HaMeR_Single.py | 11 ++++++++---
 Inject_Json.py          | 18 ++++++++++++++----
 3 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/Extract_HaMeR_Multi.py b/Extract_HaMeR_Multi.py
index fb2a432..c488c48 100644
--- a/Extract_HaMeR_Multi.py
+++ b/Extract_HaMeR_Multi.py
@@ -219,9 +219,13 @@ def main(args, model, renderer, device):
     
     else:
         # Hardcoded bbox. This assumes person is in the center and that there is always one person in the image
-        h, w, _ = img_cv2.shape
-        pred_bboxes = np.array([[0, 0, w, h]])
-        print(f"Using hardcoded bbox, {pred_bboxes[0]}")
+        if args.custom_bbox == '':
+            h, w, _ = img_cv2.shape
+            pred_bboxes = np.array([[0, 0, w, h]])
+            print(f"Using hardcoded bbox, {pred_bboxes[0]}")
+        else:
+            pred_bboxes = np.array([list(map(int, args.custom_bbox.split(',')))])
+            print(f"Using custom bbox, {pred_bboxes[0]}")
         # Force confidence to be 0.99 that human is present
         pred_scores = np.array([0.99])
     
@@ -328,6 +332,7 @@ if __name__ == '__main__':
     parser.add_argument('--body_detector', type=str, default='vitdet', choices=['vitdet', 'regnety'], help='Using regnety improves runtime and reduces memory')
     parser.add_argument('--file_type', nargs='+', default=['*.jpg', '*.png'], help='List of file extensions to consider')
     parser.add_argument('--bbox', type=bool, default=True, help= 'If set, use provided bbox from ViT')
+    parser.add_argument('--custom_bbox', type=str, default='', help='Custom bbox in the format x1,y1,x2,y2')
     parser.add_argument('--MANO_Output', type=bool, default=False, help= 'If set, generate output images')
 
     args = parser.parse_args()
diff --git a/Extract_HaMeR_Single.py b/Extract_HaMeR_Single.py
index 9aaacf8..fb9e5be 100644
--- a/Extract_HaMeR_Single.py
+++ b/Extract_HaMeR_Single.py
@@ -220,9 +220,13 @@ def main(args, model, renderer, device):
             return
     else:
         # Hardcoded bbox. This assumes person is in the center and that there is always one person in the image
-        h, w, _ = img_cv2.shape
-        pred_bboxes = np.array([[0, 0, w, h]])
-        print(f"Using hardcoded bbox, {pred_bboxes[0]}")
+        if args.custom_bbox == '':
+            h, w, _ = img_cv2.shape
+            pred_bboxes = np.array([[0, 0, w, h]])
+            print(f"Using hardcoded bbox, {pred_bboxes[0]}")
+        else:
+            pred_bboxes = np.array([list(map(int, args.custom_bbox.split(',')))])
+            print(f"Using custom bbox, {pred_bboxes[0]}")
         # Force confidence to be 0.99 that human is present
         pred_scores = np.array([0.99])
     
@@ -325,6 +329,7 @@ if __name__ == '__main__':
     parser.add_argument('--body_detector', type=str, default='vitdet', choices=['vitdet', 'regnety'], help='Using regnety improves runtime and reduces memory')
     parser.add_argument('--file_type', nargs='+', default=['*.jpg', '*.png'], help='List of file extensions to consider')
     parser.add_argument('--bbox', type=bool, default=True, help= 'If set, use provided bbox from ViT')
+    parser.add_argument('--custom_bbox', type=str, default='', help='Custom bbox in the format x1,y1,x2,y2')
     parser.add_argument('--MANO_Output', type=bool, default=False, help= 'If set, generate output images')
 
     args = parser.parse_args()
diff --git a/Inject_Json.py b/Inject_Json.py
index 6b4375a..20c6842 100644
--- a/Inject_Json.py
+++ b/Inject_Json.py
@@ -104,6 +104,11 @@ def main(args, model, device):
         image_folder = Path(args.img_folder)
     
     # Setup Image Paths and Json Paths Ranges
+    if args.json_folder.endswith('.tar.xz'):
+        cmd = f"tar -xf {args.json_folder} -C {temp_dir.name}"
+        os.system(cmd)
+        args.json_folder = os.path.join(temp_dir.name, os.path.basename(args.json_folder).removesuffix('.tar.xz'))
+
     img_paths = [img for end in args.file_type for img in image_folder.glob(end)]
     img_paths = sorted(img_paths,key = lambda x: int(os.path.basename(x).removesuffix('.png').removeprefix('Frame')))
     json_list = [os.path.join(args.json_folder, f) for f in os.listdir(args.json_folder) if f.endswith('.json')]
@@ -138,9 +143,13 @@ def main(args, model, device):
             return
     else:
         # Hardcoded bbox. This assumes person is in the center and that there is always one person in the image
-        h, w, _ = img_cv2.shape
-        pred_bboxes = np.array([[0, 0, w, h]])
-        print(f"Using hardcoded bbox, {pred_bboxes[0]}")
+        if args.custom_bbox == '':
+            h, w, _ = img_cv2.shape
+            pred_bboxes = np.array([[0, 0, w, h]])
+            print(f"Using hardcoded bbox, {pred_bboxes[0]}")
+        else:
+            pred_bboxes = np.array([list(map(int, args.custom_bbox.split(',')))])
+            print(f"Using custom bbox, {pred_bboxes[0]}")
         # Force confidence to be 0.99 that human is present
         pred_scores = np.array([0.99])
     
@@ -300,12 +309,13 @@ if __name__ == '__main__':
     parser.add_argument('--file_type', nargs='+', default=['*.jpg', '*.png'], help='List of file extensions to consider')
     parser.add_argument('--injected_hand', type=int, default=2, help='Number of hands in the Video')
     parser.add_argument('--bbox', dest='bbox', action='store_true', default=True, help='If set, use detected bbox')
+    parser.add_argument('--custom_bbox', type=str, default='', help='Custom bbox in the format x1,y1,x2,y2')
     parser.add_argument('--json_folder', type=str, default='', help='Json file for input')
     parser.add_argument('--frame_range', type=str, default='0,-1', help='Frame range for input')
     args = parser.parse_args()
     
     args.vid = '/vol/vssp/datasets/mixedmode/mein-dgs-korpus/CROPPED_VIDEOS/1176549/1176549_1a1.cropped.mp4'
-    args.json_folder = '/vol/research/signVision/Projects/BSLboundaries/MeinDGS_Feature/1176549/1176549_1a1.cropped'
+    args.json_folder = '/vol/research/signVision/Projects/BSLboundaries/MeinDGS_HaMeR_Feature/1176549/1176549_1a1.cropped.tar.xz'
     args.out_folder = '/vol/research/signVision/Projects/BSLboundaries/Test'
     args.frame_range = '0,150'
     args.bbox = False
-- 
GitLab