Skip to content
Snippets Groups Projects
Commit 1fee717c authored by George Alcolado Nuthall's avatar George Alcolado Nuthall
Browse files

feat: bash and py to run transformed and reformatted colmap poses

parent 2af77ef9
No related branches found
No related tags found
No related merge requests found
import argparse
import os
import math
import sys
import numpy as np
import json
import cv2
from scipy.spatial.transform import Rotation as R
def qvec2rotmat(qvec):
return np.array([
[
1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]
], [
2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]
], [
2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2
]
])
def rotmat(a, b):
a, b = a / np.linalg.norm(a), b / np.linalg.norm(b)
v = np.cross(a, b)
c = np.dot(a, b)
# handle exception for the opposite direction input
if c < -1 + 1e-10:
return rotmat(a + np.random.uniform(-1e-2, 1e-2, 3), b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
return np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2 + 1e-10))
# returns point closest to both rays of form o+t*d, and a weight factor that goes to 0 if the lines are parallel
def closest_point_2_lines(oa, da, ob, db):
da = da / np.linalg.norm(da)
db = db / np.linalg.norm(db)
c = np.cross(da, db)
denom = np.linalg.norm(c)**2
t = ob - oa
ta = np.linalg.det([t, db, c]) / (denom + 1e-10)
tb = np.linalg.det([t, da, c]) / (denom + 1e-10)
if ta > 0:
ta = 0
if tb > 0:
tb = 0
return (oa+ta*da+ob+tb*db) * 0.5, denom
def manage_cameras(args):
# nerfstudio requires https://docs.nerf.studio/en/latest/quickstart/data_conventions.html
with open(os.path.join(args.colmap_dir, "cameras.txt"), "r") as f:
cameras = []
for line in f:
# 1 SIMPLE_RADIAL 2048 1536 1580.46 1024 768 0.0045691
# 1 OPENCV 3840 2160 3178.27 3182.09 1920 1080 0.159668 -0.231286 -0.00123982 0.00272224
# 1 RADIAL 1920 1080 1665.1 960 540 0.0672856 -0.0761443
if line[0] == "#":
continue
els = line.split(" ")
w = float(els[2])
h = float(els[3])
fl_x = float(els[4])
fl_y = float(els[4])
k1, k2, k3, k4 = 0, 0, 0, 0
p1, p2 = 0, 0
cx = w / 2
cy = h / 2
model = els[1]
if model == "PINHOLE":
fl_y = float(els[5])
cx = float(els[6])
cy = float(els[7])
else:
print("Unknown camera model ", els[1])
cameras.append({
"id": els[0],
"fl_x": fl_x,
"fl_y": fl_y,
"cx": cx,
"cy": cy,
"k1": k1,
"k2": k2,
"k3": k3,
"k4": k4,
"p1": p1,
"p2": p2,
})
print(
f"camera:\n\tres={w,h}\n\tcenter={cx,cy}\n\tfocal={fl_x,fl_y}\n")
return cameras, model, w, h
def handle_images(args, cameras):
flip_mat = np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
frames = []
bottom = np.array([0.0, 0.0, 0.0, 1.0]).reshape([1, 4])
with open(os.path.join(args.colmap_dir, "images.txt"), "r") as f:
i = 0
for line in f:
line = line.strip()
if line[0] == "#":
continue
i = i + 1
if i < 0*2:
continue
if i % 2 == 1:
# 1-4 is quat, 5-7 is trans, 8 is camera id, 9 is filename
elems = line.split(" ")
cam_data = next((item for item in cameras if item['id'] == elems[8]), None)
qvec = np.array(tuple(map(float, elems[1:5]))) # qw, qx, qy, qz
tvec = np.array(tuple(map(float, elems[5:8]))) # x, y, z
tvec = np.matrix(tvec)
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
R = qvec2rotmat(-qvec)
t = tvec.reshape([3, 1])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
c2w = np.linalg.inv(m)
if args.train_nerf == 'True':
transform_matrix = np.matmul(c2w,flip_mat).tolist()
else:
transform_matrix = c2w.tolist()
frame = {
'file_path': os.path.abspath(os.path.join(args.imagepath, elems[9:][0])),
#'mask_path': os.path.abspath(os.path.join(args.maskpath, elems[9:][0].split('/')[-1])),
'transform_matrix': transform_matrix,
'fl_x': cam_data['fl_x'],
'fl_y': cam_data['fl_y'],
'cx': cam_data['cx'],
'cy': cam_data['cy'],
'k1': cam_data['k1'],
'k2': cam_data['k2'],
'k3': cam_data['k3'],
'k4': cam_data['k4'],
'p1': cam_data['p1'],
'p2': cam_data['p2']
}
frames.append(frame)
return frames
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="convert a text colmap export to nerf format transforms.json")
parser.add_argument("--colmap_dir", default="./txt")
parser.add_argument("--outpath", default="./train.json")
parser.add_argument("--imagepath", default="./images")
parser.add_argument("--maskpath", default="./masks")
parser.add_argument('--train_nerf', default=True)
args = parser.parse_args()
cameras, model, w, h = manage_cameras(args)
output = {
"camera_model": model,
'w': w,
'h': h,
"frames": []
}
frames = handle_images(args, cameras)
output["frames"] = frames
with open(args.outpath, "w") as outfile:
json.dump(output, outfile, indent=2)
#! /bin/bash
# CORE DIR - you need an images sub-directory (everything you've used for colmap), model-txt sub-directory
# which is the exported txt files from colmap model converter command, then its up to you if you want a different
# OUTPUT_DIR sub-directory for the nerfstudio transforms
core_dir=/vol/research/K9/people-centred-sim/13_sept_run
COLMAP_DIR="${core_dir}/model-txt"
IMAGE_DIR="${core_dir}/images"
OUTPUT_DIR="${core_dir}" # could add sub-directory
CODE_PATH="/vol/research/K9/people-centred-sim/pipeline" # change to whereever you pull the code to
EXP_NAME="EXP_X" # makes it easier to find the checkpoint - otherwise goes under a timestamp
MODEL="nerfacto"
echo 'Generate Transforms'
python3 $CODE_PATH/create_nerfstudio_train.py --colmap_dir "${COLMAP_DIR}" --imagepath "${IMAGE_DIR}" --train_nerf True --outpath "${OUTPUT_DIR}/train.json"
echo 'Train Model'
ns-train nerfacto --data "${OUTPUT_DIR}/train.json" --timestamp "${EXP_NAME}" --output-dir ${OUTPUT_DIR}/out --viewer.quit-on-train-completion True
echo 'Model Evaluation'
ns-eval --load-config "${OUTPUT_DIR}/out/${EXP_NAME}/${MODEL}/${EXP_NAME}/config.yml" --output-path "${OUTPUT_DIR}/out/output.json"
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment