COLMAP¶
Visualize COLMAP sparse reconstruction outputs. To get demo data, see ../assets/download_assets.sh.
Features:
COLMAP sparse reconstruction file parsing
Camera frustum visualization with
viser.SceneApi.add_camera_frustum()3D point cloud display from structure-from-motion
Interactive camera and point visibility controls
Note
This example requires external assets. To download them, run:
git clone -b v1.0.26 https://github.com/viser-project/viser.git
cd viser/examples
./assets/download_assets.sh
python 04_demos/01_colmap_visualizer.py # With viser installed.
Source: examples/04_demos/01_colmap_visualizer.py
Code¶
1import random
2import time
3from pathlib import Path
4from typing import List
5
6import imageio.v3 as iio
7import numpy as np
8import tyro
9from tqdm.auto import tqdm
10
11import viser
12import viser.transforms as vtf
13from viser.extras.colmap import (
14 read_cameras_binary,
15 read_images_binary,
16 read_points3d_binary,
17)
18
19
20def main(
21 colmap_path: Path = Path(__file__).parent / "../assets/colmap_garden/sparse/0",
22 images_path: Path = Path(__file__).parent / "../assets/colmap_garden/images_8",
23 downsample_factor: int = 2,
24 reorient_scene: bool = True,
25) -> None:
26 server = viser.ViserServer()
27 server.gui.configure_theme(titlebar_content=None, control_layout="collapsible")
28
29 # Load the colmap info.
30 cameras = read_cameras_binary(colmap_path / "cameras.bin")
31 images = read_images_binary(colmap_path / "images.bin")
32 points3d = read_points3d_binary(colmap_path / "points3D.bin")
33
34 points = np.array([points3d[p_id].xyz for p_id in points3d])
35 colors = np.array([points3d[p_id].rgb for p_id in points3d])
36
37 # Let's rotate the scene so the average camera direction is pointing up.
38 if reorient_scene:
39 average_up = (
40 # `qvec` corresponds to T_camera_world; we convert to T_world_camera.
41 vtf.SO3(np.array([img.qvec for img in images.values()])).inverse()
42 @ np.array([0.0, -1.0, 0.0]) # -y is up in the local frame!
43 ).mean(axis=0)
44 average_up /= np.linalg.norm(average_up)
45 server.scene.set_up_direction((average_up[0], average_up[1], average_up[2]))
46
47 gui_points = server.gui.add_slider(
48 "Max points",
49 min=1,
50 max=len(points3d),
51 step=1,
52 initial_value=min(len(points3d), 50_000),
53 )
54 gui_frames = server.gui.add_slider(
55 "Max frames",
56 min=1,
57 max=len(images),
58 step=1,
59 initial_value=min(len(images), 50),
60 )
61 gui_point_size = server.gui.add_slider(
62 "Point size", min=0.01, max=0.1, step=0.001, initial_value=0.02
63 )
64
65 point_mask = np.random.choice(points.shape[0], gui_points.value, replace=False)
66 point_cloud = server.scene.add_point_cloud(
67 name="/colmap/pcd",
68 points=points[point_mask],
69 colors=colors[point_mask],
70 point_size=gui_point_size.value,
71 )
72 frames: List[viser.FrameHandle] = []
73
74 def visualize_frames() -> None:
75
76 # Remove existing image frames.
77 for frame in frames:
78 frame.remove()
79 frames.clear()
80
81 # Interpret the images and cameras.
82 img_ids = [im.id for im in images.values()]
83 random.shuffle(img_ids)
84 img_ids = sorted(img_ids[: gui_frames.value])
85
86 for img_id in tqdm(img_ids):
87 img = images[img_id]
88 cam = cameras[img.camera_id]
89
90 # Skip images that don't exist.
91 image_filename = images_path / img.name
92 if not image_filename.exists():
93 continue
94
95 T_world_camera = vtf.SE3.from_rotation_and_translation(
96 vtf.SO3(img.qvec), img.tvec
97 ).inverse()
98 frame = server.scene.add_frame(
99 f"/colmap/frame_{img_id}",
100 wxyz=T_world_camera.rotation().wxyz,
101 position=T_world_camera.translation(),
102 axes_length=0.1,
103 axes_radius=0.005,
104 )
105 frames.append(frame)
106
107 # For pinhole cameras, cam.params will be (fx, fy, cx, cy).
108 if cam.model != "PINHOLE":
109 print(f"Expected pinhole camera, but got {cam.model}")
110
111 H, W = cam.height, cam.width
112 fy = cam.params[1]
113 image = iio.imread(image_filename)
114 image = image[::downsample_factor, ::downsample_factor]
115 frustum = server.scene.add_camera_frustum(
116 f"/colmap/frame_{img_id}/frustum",
117 fov=2 * np.arctan2(H / 2, fy),
118 aspect=W / H,
119 scale=0.15,
120 image=image,
121 )
122
123 @frustum.on_click
124 def _(_, frame=frame) -> None:
125 for client in server.get_clients().values():
126 client.camera.wxyz = frame.wxyz
127 client.camera.position = frame.position
128
129 need_update = True
130
131 @gui_points.on_update
132 def _(_) -> None:
133 point_mask = np.random.choice(points.shape[0], gui_points.value, replace=False)
134 with server.atomic():
135 point_cloud.points = points[point_mask]
136 point_cloud.colors = colors[point_mask]
137
138 @gui_frames.on_update
139 def _(_) -> None:
140 nonlocal need_update
141 need_update = True
142
143 @gui_point_size.on_update
144 def _(_) -> None:
145 point_cloud.point_size = gui_point_size.value
146
147 while True:
148 if need_update:
149 need_update = False
150 visualize_frames()
151
152 time.sleep(1e-3)
153
154
155if __name__ == "__main__":
156 tyro.cli(main)