Spaces:
Paused
Paused
Commit
Β·
dbccd94
1
Parent(s):
533154e
add point filter; check if we can upload gradio_cached_examples
Browse files- .gitattributes +1 -0
- README.md +1 -1
- app.py +98 -62
- gradio_cached_examples/17/Reconstruction/945304e792b0f852dd99/glbscene.glb +0 -0
- gradio_cached_examples/17/Reconstruction/acbcb0d82e2838cc056a/glbscene.glb +0 -0
- gradio_cached_examples/17/Reconstruction/bbd7de840562d63202f0/glbscene.glb +0 -0
- gradio_cached_examples/17/Reconstruction/eff9890930c07374da16/glbscene.glb +0 -0
- gradio_cached_examples/17/log.csv +5 -0
- images_to_videos.py +1 -1
- requirements.txt +1 -0
- vggsfm_code/examples/british_museum/{images_10 β images}/069.jpg +0 -0
- vggsfm_code/examples/british_museum/{images_10 β images}/134.jpg +0 -0
- vggsfm_code/examples/british_museum/{images_10 β images}/192.jpg +0 -0
- vggsfm_code/examples/british_museum/{images_10 β images}/336.jpg +0 -0
- vggsfm_code/examples/british_museum/{images_10 β images}/515.jpg +0 -0
- vggsfm_code/examples/british_museum/images_10/210.jpg +0 -0
- vggsfm_code/examples/british_museum/images_10/599.jpg +0 -0
- vggsfm_code/examples/british_museum/images_10/632.jpg +0 -0
- vggsfm_code/examples/british_museum/images_10/767.jpg +0 -0
- vggsfm_code/examples/british_museum/images_10/886.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/001.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/002.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/003.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/004.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/005.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/006.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/007.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/008.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/009.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/010.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/011.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/012.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/013.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/014.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/015.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/016.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/017.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/018.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/019.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/020.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/021.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/022.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/023.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/024.jpg +0 -0
- vggsfm_code/examples/in2n_face/images/025.jpg +0 -0
- vggsfm_code/examples/videos/british_museum_video.mp4 +2 -2
- vggsfm_code/examples/videos/in2n_face_video.mp4 +3 -0
- viz_utils/__pycache__/viz_fn.cpython-310.pyc +0 -0
- viz_utils/viz_fn.py +40 -0
.gitattributes
CHANGED
|
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 36 |
*.JPG filter=lfs diff=lfs merge=lfs -text
|
| 37 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
vggsfm_code/examples/ filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 36 |
*.JPG filter=lfs diff=lfs merge=lfs -text
|
| 37 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
vggsfm_code/examples/ filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
gradio_cached_examples/ filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
---
|
| 2 |
title: VGGSfM
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
|
|
|
| 1 |
---
|
| 2 |
title: VGGSfM
|
| 3 |
+
emoji: ποΈ
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
app.py
CHANGED
|
@@ -14,12 +14,13 @@ from datetime import datetime
|
|
| 14 |
|
| 15 |
from vggsfm_code.hf_demo import demo_fn
|
| 16 |
from omegaconf import DictConfig, OmegaConf
|
| 17 |
-
from viz_utils.viz_fn import add_camera
|
| 18 |
import glob
|
| 19 |
#
|
| 20 |
from scipy.spatial.transform import Rotation
|
| 21 |
import PIL
|
| 22 |
import gc
|
|
|
|
| 23 |
|
| 24 |
# import spaces
|
| 25 |
|
|
@@ -30,15 +31,13 @@ def vggsfm_demo(
|
|
| 30 |
query_frame_num,
|
| 31 |
max_query_pts=4096,
|
| 32 |
):
|
|
|
|
|
|
|
|
|
|
| 33 |
gc.collect()
|
| 34 |
torch.cuda.empty_cache()
|
| 35 |
|
| 36 |
-
|
| 37 |
-
if not isinstance(input_video, str):
|
| 38 |
-
input_video = input_video["video"]["path"]
|
| 39 |
-
|
| 40 |
-
cfg_file = "vggsfm_code/cfgs/demo.yaml"
|
| 41 |
-
cfg = OmegaConf.load(cfg_file)
|
| 42 |
|
| 43 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 44 |
|
|
@@ -52,58 +51,72 @@ def vggsfm_demo(
|
|
| 52 |
target_dir_images = target_dir + "/images"
|
| 53 |
os.makedirs(target_dir_images)
|
| 54 |
|
| 55 |
-
if input_image is not None:
|
| 56 |
-
if len(input_image)<3:
|
| 57 |
-
return None, "Please input at least three frames"
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
shutil.copy(file_name, target_dir_images)
|
| 65 |
-
elif input_video is not None:
|
| 66 |
-
vs = cv2.VideoCapture(input_video)
|
| 67 |
|
| 68 |
-
|
|
|
|
|
|
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
|
| 72 |
-
frame_interval = int(fps * frame_rate)
|
| 73 |
-
|
| 74 |
-
video_frame_num = 0
|
| 75 |
-
count = 0
|
| 76 |
-
|
| 77 |
-
while video_frame_num<=max_input_image:
|
| 78 |
-
(gotit, frame) = vs.read()
|
| 79 |
-
count +=1
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
# try:
|
| 99 |
-
predictions = demo_fn(cfg)
|
| 100 |
-
# except:
|
| 101 |
-
# return None, "Something seems to be incorrect. Please verify that your inputs are formatted correctly. If the issue persists, kindly create a GitHub issue for further assistance."
|
| 102 |
|
| 103 |
glbscene = vggsfm_predictions_to_glb(predictions)
|
| 104 |
|
| 105 |
glbfile = target_dir + "/glbscene.glb"
|
| 106 |
-
glbscene.export(file_obj=glbfile)
|
|
|
|
|
|
|
| 107 |
|
| 108 |
del predictions
|
| 109 |
gc.collect()
|
|
@@ -111,12 +124,18 @@ def vggsfm_demo(
|
|
| 111 |
|
| 112 |
print(input_image)
|
| 113 |
print(input_video)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
return glbfile, "Success"
|
| 115 |
|
| 116 |
|
| 117 |
|
| 118 |
|
| 119 |
-
def vggsfm_predictions_to_glb(predictions):
|
|
|
|
|
|
|
| 120 |
# learned from https://github.com/naver/dust3r/blob/main/dust3r/viz.py
|
| 121 |
points3D = predictions["points3D"].cpu().numpy()
|
| 122 |
points3D_rgb = predictions["points3D_rgb"].cpu().numpy()
|
|
@@ -124,13 +143,34 @@ def vggsfm_predictions_to_glb(predictions):
|
|
| 124 |
|
| 125 |
extrinsics_opencv = predictions["extrinsics_opencv"].cpu().numpy()
|
| 126 |
intrinsics_opencv = predictions["intrinsics_opencv"].cpu().numpy()
|
|
|
|
|
|
|
| 127 |
raw_image_paths = predictions["raw_image_paths"]
|
| 128 |
images = predictions["images"].permute(0,2,3,1).cpu().numpy()
|
| 129 |
images = (images*255).astype(np.uint8)
|
| 130 |
|
| 131 |
glbscene = trimesh.Scene()
|
| 132 |
-
|
| 133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
|
| 136 |
camera_edge_colors = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 0, 255), (255, 204, 0), (0, 204, 204),
|
|
@@ -160,27 +200,21 @@ def vggsfm_predictions_to_glb(predictions):
|
|
| 160 |
glbscene.apply_transform(np.linalg.inv(np.linalg.inv(extrinsics_opencv_4x4[0]) @ opengl_mat @ rot))
|
| 161 |
|
| 162 |
# Calculate the bounding box center and apply the translation
|
| 163 |
-
bounding_box = glbscene.bounds
|
| 164 |
-
center = (bounding_box[0] + bounding_box[1]) / 2
|
| 165 |
-
translation = np.eye(4)
|
| 166 |
-
translation[:3, 3] = -center
|
| 167 |
|
| 168 |
-
glbscene.apply_transform(translation)
|
| 169 |
# glbfile = "glbscene.glb"
|
| 170 |
# glbscene.export(file_obj=glbfile)
|
| 171 |
return glbscene
|
| 172 |
|
| 173 |
apple_video = "vggsfm_code/examples/videos/apple_video.mp4"
|
| 174 |
-
# os.path.join(os.path.dirname(__file__), "apple_video.mp4")
|
| 175 |
british_museum_video = "vggsfm_code/examples/videos/british_museum_video.mp4"
|
| 176 |
-
|
| 177 |
-
# os.path.join(os.path.dirname(__file__), "british_museum_video.mp4")
|
| 178 |
cake_video = "vggsfm_code/examples/videos/cake_video.mp4"
|
| 179 |
-
|
| 180 |
bonsai_video = "vggsfm_code/examples/videos/bonsai_video.mp4"
|
| 181 |
-
|
| 182 |
-
# os.path.join(os.path.dirname(__file__), "cake_video.mp4")
|
| 183 |
-
|
| 184 |
|
| 185 |
|
| 186 |
apple_images = glob.glob(f'vggsfm_code/examples/apple/images/*')
|
|
@@ -188,6 +222,7 @@ bonsai_images = glob.glob(f'vggsfm_code/examples/bonsai/images/*')
|
|
| 188 |
cake_images = glob.glob(f'vggsfm_code/examples/cake/images/*')
|
| 189 |
british_museum_images = glob.glob(f'vggsfm_code/examples/british_museum/images/*')
|
| 190 |
|
|
|
|
| 191 |
|
| 192 |
|
| 193 |
|
|
@@ -221,7 +256,7 @@ with gr.Blocks() as demo:
|
|
| 221 |
info="More query points usually lead to denser reconstruction at lower speeds.")
|
| 222 |
|
| 223 |
with gr.Column(scale=3):
|
| 224 |
-
reconstruction_output = gr.Model3D(label="Reconstruction", height=520)
|
| 225 |
log_output = gr.Textbox(label="Log")
|
| 226 |
|
| 227 |
with gr.Row():
|
|
@@ -232,6 +267,7 @@ with gr.Blocks() as demo:
|
|
| 232 |
|
| 233 |
|
| 234 |
examples = [
|
|
|
|
| 235 |
[british_museum_video, british_museum_images, 1, 4096],
|
| 236 |
[apple_video, apple_images, 6, 2048],
|
| 237 |
[bonsai_video, bonsai_images, 3, 2048],
|
|
|
|
| 14 |
|
| 15 |
from vggsfm_code.hf_demo import demo_fn
|
| 16 |
from omegaconf import DictConfig, OmegaConf
|
| 17 |
+
from viz_utils.viz_fn import add_camera, apply_density_filter_np
|
| 18 |
import glob
|
| 19 |
#
|
| 20 |
from scipy.spatial.transform import Rotation
|
| 21 |
import PIL
|
| 22 |
import gc
|
| 23 |
+
import open3d as o3d
|
| 24 |
|
| 25 |
# import spaces
|
| 26 |
|
|
|
|
| 31 |
query_frame_num,
|
| 32 |
max_query_pts=4096,
|
| 33 |
):
|
| 34 |
+
|
| 35 |
+
import time
|
| 36 |
+
start_time = time.time()
|
| 37 |
gc.collect()
|
| 38 |
torch.cuda.empty_cache()
|
| 39 |
|
| 40 |
+
debug = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 43 |
|
|
|
|
| 51 |
target_dir_images = target_dir + "/images"
|
| 52 |
os.makedirs(target_dir_images)
|
| 53 |
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
if debug:
|
| 56 |
+
predictions = torch.load("predictions_scene2.pth")
|
| 57 |
+
else:
|
| 58 |
+
|
| 59 |
+
if input_video is not None:
|
| 60 |
+
if not isinstance(input_video, str):
|
| 61 |
+
input_video = input_video["video"]["path"]
|
| 62 |
|
| 63 |
+
cfg_file = "vggsfm_code/cfgs/demo.yaml"
|
| 64 |
+
cfg = OmegaConf.load(cfg_file)
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
+
if input_image is not None:
|
| 67 |
+
if len(input_image)<3:
|
| 68 |
+
return None, "Please input at least three frames"
|
| 69 |
|
| 70 |
+
input_image = sorted(input_image)
|
| 71 |
+
input_image = input_image[:max_input_image]
|
| 72 |
+
|
| 73 |
+
# Copy files to the new directory
|
| 74 |
+
for file_name in input_image:
|
| 75 |
+
shutil.copy(file_name, target_dir_images)
|
| 76 |
+
elif input_video is not None:
|
| 77 |
+
vs = cv2.VideoCapture(input_video)
|
| 78 |
|
| 79 |
+
fps = vs.get(cv2.CAP_PROP_FPS)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
+
|
| 82 |
+
frame_rate = 1
|
| 83 |
+
frame_interval = int(fps * frame_rate)
|
| 84 |
+
|
| 85 |
+
video_frame_num = 0
|
| 86 |
+
count = 0
|
| 87 |
|
| 88 |
+
while video_frame_num<=max_input_image:
|
| 89 |
+
(gotit, frame) = vs.read()
|
| 90 |
+
count +=1
|
| 91 |
+
|
| 92 |
+
if not gotit:
|
| 93 |
+
break
|
| 94 |
|
| 95 |
+
if count % frame_interval == 0:
|
| 96 |
+
cv2.imwrite(target_dir_images+"/"+f"{video_frame_num:06}.png", frame)
|
| 97 |
+
video_frame_num+=1
|
| 98 |
+
|
| 99 |
+
if video_frame_num<3:
|
| 100 |
+
return None, "Please input at least three frames"
|
| 101 |
+
else:
|
| 102 |
+
return None, "Input format incorrect"
|
| 103 |
+
|
| 104 |
+
cfg.query_frame_num = query_frame_num
|
| 105 |
+
cfg.max_query_pts = max_query_pts
|
| 106 |
+
print(f"Files have been copied to {target_dir_images}")
|
| 107 |
+
cfg.SCENE_DIR = target_dir
|
| 108 |
|
| 109 |
+
# try:
|
| 110 |
+
predictions = demo_fn(cfg)
|
| 111 |
+
# except:
|
| 112 |
+
# return None, "Something seems to be incorrect. Please verify that your inputs are formatted correctly. If the issue persists, kindly create a GitHub issue for further assistance."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
glbscene = vggsfm_predictions_to_glb(predictions)
|
| 115 |
|
| 116 |
glbfile = target_dir + "/glbscene.glb"
|
| 117 |
+
glbscene.export(file_obj=glbfile)
|
| 118 |
+
# glbscene.export(file_obj=glbfile, line_settings= {'point_size': 20})
|
| 119 |
+
|
| 120 |
|
| 121 |
del predictions
|
| 122 |
gc.collect()
|
|
|
|
| 124 |
|
| 125 |
print(input_image)
|
| 126 |
print(input_video)
|
| 127 |
+
end_time = time.time()
|
| 128 |
+
execution_time = end_time - start_time
|
| 129 |
+
print(f"Execution time: {execution_time} seconds")
|
| 130 |
+
|
| 131 |
return glbfile, "Success"
|
| 132 |
|
| 133 |
|
| 134 |
|
| 135 |
|
| 136 |
+
def vggsfm_predictions_to_glb(predictions, sphere=False):
|
| 137 |
+
# del predictions['reconstruction']
|
| 138 |
+
# torch.save(predictions, "predictions_scene2.pth")
|
| 139 |
# learned from https://github.com/naver/dust3r/blob/main/dust3r/viz.py
|
| 140 |
points3D = predictions["points3D"].cpu().numpy()
|
| 141 |
points3D_rgb = predictions["points3D_rgb"].cpu().numpy()
|
|
|
|
| 143 |
|
| 144 |
extrinsics_opencv = predictions["extrinsics_opencv"].cpu().numpy()
|
| 145 |
intrinsics_opencv = predictions["intrinsics_opencv"].cpu().numpy()
|
| 146 |
+
|
| 147 |
+
|
| 148 |
raw_image_paths = predictions["raw_image_paths"]
|
| 149 |
images = predictions["images"].permute(0,2,3,1).cpu().numpy()
|
| 150 |
images = (images*255).astype(np.uint8)
|
| 151 |
|
| 152 |
glbscene = trimesh.Scene()
|
| 153 |
+
|
| 154 |
+
if True:
|
| 155 |
+
pcd = o3d.geometry.PointCloud()
|
| 156 |
+
pcd.points = o3d.utility.Vector3dVector(points3D)
|
| 157 |
+
pcd.colors = o3d.utility.Vector3dVector(points3D_rgb)
|
| 158 |
+
|
| 159 |
+
cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=1.0)
|
| 160 |
+
filtered_pcd = pcd.select_by_index(ind)
|
| 161 |
+
|
| 162 |
+
print(f"Filter out {len(points3D) - len(filtered_pcd.points)} 3D points")
|
| 163 |
+
points3D = np.asarray(filtered_pcd.points)
|
| 164 |
+
points3D_rgb = np.asarray(filtered_pcd.colors)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
if sphere:
|
| 168 |
+
# TOO SLOW
|
| 169 |
+
print("testing sphere")
|
| 170 |
+
# point_size = 0.02
|
| 171 |
+
else:
|
| 172 |
+
point_cloud = trimesh.PointCloud(points3D, colors=points3D_rgb)
|
| 173 |
+
glbscene.add_geometry(point_cloud)
|
| 174 |
|
| 175 |
|
| 176 |
camera_edge_colors = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (255, 0, 255), (255, 204, 0), (0, 204, 204),
|
|
|
|
| 200 |
glbscene.apply_transform(np.linalg.inv(np.linalg.inv(extrinsics_opencv_4x4[0]) @ opengl_mat @ rot))
|
| 201 |
|
| 202 |
# Calculate the bounding box center and apply the translation
|
| 203 |
+
# bounding_box = glbscene.bounds
|
| 204 |
+
# center = (bounding_box[0] + bounding_box[1]) / 2
|
| 205 |
+
# translation = np.eye(4)
|
| 206 |
+
# translation[:3, 3] = -center
|
| 207 |
|
| 208 |
+
# glbscene.apply_transform(translation)
|
| 209 |
# glbfile = "glbscene.glb"
|
| 210 |
# glbscene.export(file_obj=glbfile)
|
| 211 |
return glbscene
|
| 212 |
|
| 213 |
apple_video = "vggsfm_code/examples/videos/apple_video.mp4"
|
|
|
|
| 214 |
british_museum_video = "vggsfm_code/examples/videos/british_museum_video.mp4"
|
|
|
|
|
|
|
| 215 |
cake_video = "vggsfm_code/examples/videos/cake_video.mp4"
|
|
|
|
| 216 |
bonsai_video = "vggsfm_code/examples/videos/bonsai_video.mp4"
|
| 217 |
+
face_video = "vggsfm_code/examples/videos/in2n_face_video.mp4"
|
|
|
|
|
|
|
| 218 |
|
| 219 |
|
| 220 |
apple_images = glob.glob(f'vggsfm_code/examples/apple/images/*')
|
|
|
|
| 222 |
cake_images = glob.glob(f'vggsfm_code/examples/cake/images/*')
|
| 223 |
british_museum_images = glob.glob(f'vggsfm_code/examples/british_museum/images/*')
|
| 224 |
|
| 225 |
+
face_images = glob.glob(f'vggsfm_code/examples/in2n_face/images/*')
|
| 226 |
|
| 227 |
|
| 228 |
|
|
|
|
| 256 |
info="More query points usually lead to denser reconstruction at lower speeds.")
|
| 257 |
|
| 258 |
with gr.Column(scale=3):
|
| 259 |
+
reconstruction_output = gr.Model3D(label="Reconstruction", height=520, zoom_speed=1, pan_speed=1)
|
| 260 |
log_output = gr.Textbox(label="Log")
|
| 261 |
|
| 262 |
with gr.Row():
|
|
|
|
| 267 |
|
| 268 |
|
| 269 |
examples = [
|
| 270 |
+
[face_video, face_images, 4, 2048],
|
| 271 |
[british_museum_video, british_museum_images, 1, 4096],
|
| 272 |
[apple_video, apple_images, 6, 2048],
|
| 273 |
[bonsai_video, bonsai_images, 3, 2048],
|
gradio_cached_examples/17/Reconstruction/945304e792b0f852dd99/glbscene.glb
ADDED
|
Binary file (62.3 kB). View file
|
|
|
gradio_cached_examples/17/Reconstruction/acbcb0d82e2838cc056a/glbscene.glb
ADDED
|
Binary file (133 kB). View file
|
|
|
gradio_cached_examples/17/Reconstruction/bbd7de840562d63202f0/glbscene.glb
ADDED
|
Binary file (130 kB). View file
|
|
|
gradio_cached_examples/17/Reconstruction/eff9890930c07374da16/glbscene.glb
ADDED
|
Binary file (77.2 kB). View file
|
|
|
gradio_cached_examples/17/log.csv
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Reconstruction,Log,flag,username,timestamp
|
| 2 |
+
"{""path"": ""gradio_cached_examples/17/Reconstruction/bbd7de840562d63202f0/glbscene.glb"", ""url"": ""/file=/tmp/gradio/fca5f8328739501143569c8f61c49f859d6050aa/glbscene.glb"", ""size"": null, ""orig_name"": ""glbscene.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",Success,,,2024-06-27 20:41:21.350514
|
| 3 |
+
"{""path"": ""gradio_cached_examples/17/Reconstruction/945304e792b0f852dd99/glbscene.glb"", ""url"": ""/file=/tmp/gradio/4cc4bf183538e927e644e048332dcdaff12da3cf/glbscene.glb"", ""size"": null, ""orig_name"": ""glbscene.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",Success,,,2024-06-27 20:41:41.130464
|
| 4 |
+
"{""path"": ""gradio_cached_examples/17/Reconstruction/acbcb0d82e2838cc056a/glbscene.glb"", ""url"": ""/file=/tmp/gradio/ba332ab61cbd09dd2e7c53f11d876cb561b990cd/glbscene.glb"", ""size"": null, ""orig_name"": ""glbscene.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",Success,,,2024-06-27 20:42:49.325264
|
| 5 |
+
"{""path"": ""gradio_cached_examples/17/Reconstruction/eff9890930c07374da16/glbscene.glb"", ""url"": ""/file=/tmp/gradio/803adcd2ead20dcc56157d3e0fe98a7b7cd12bd7/glbscene.glb"", ""size"": null, ""orig_name"": ""glbscene.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",Success,,,2024-06-27 20:43:17.587081
|
images_to_videos.py
CHANGED
|
@@ -2,7 +2,7 @@ import cv2
|
|
| 2 |
import os
|
| 3 |
|
| 4 |
# Parameters
|
| 5 |
-
name = "
|
| 6 |
folder_path = f'vggsfm_code/examples/{name}/images' # Update with the path to your images
|
| 7 |
video_path = f'vggsfm_code/examples/videos/{name}_video.mp4'
|
| 8 |
fps = 1 # frames per second
|
|
|
|
| 2 |
import os
|
| 3 |
|
| 4 |
# Parameters
|
| 5 |
+
name = "in2n_face"
|
| 6 |
folder_path = f'vggsfm_code/examples/{name}/images' # Update with the path to your images
|
| 7 |
video_path = f'vggsfm_code/examples/videos/{name}_video.mp4'
|
| 8 |
fps = 1 # frames per second
|
requirements.txt
CHANGED
|
@@ -10,3 +10,4 @@ numpy==1.26.3
|
|
| 10 |
pycolmap==0.6.1
|
| 11 |
https://huggingface.co/facebook/VGGSfM/resolve/main/poselib-2.0.2-cp310-cp310-linux_x86_64.whl
|
| 12 |
trimesh
|
|
|
|
|
|
| 10 |
pycolmap==0.6.1
|
| 11 |
https://huggingface.co/facebook/VGGSfM/resolve/main/poselib-2.0.2-cp310-cp310-linux_x86_64.whl
|
| 12 |
trimesh
|
| 13 |
+
open3d
|
vggsfm_code/examples/british_museum/{images_10 β images}/069.jpg
RENAMED
|
File without changes
|
vggsfm_code/examples/british_museum/{images_10 β images}/134.jpg
RENAMED
|
File without changes
|
vggsfm_code/examples/british_museum/{images_10 β images}/192.jpg
RENAMED
|
File without changes
|
vggsfm_code/examples/british_museum/{images_10 β images}/336.jpg
RENAMED
|
File without changes
|
vggsfm_code/examples/british_museum/{images_10 β images}/515.jpg
RENAMED
|
File without changes
|
vggsfm_code/examples/british_museum/images_10/210.jpg
DELETED
|
Binary file (394 kB)
|
|
|
vggsfm_code/examples/british_museum/images_10/599.jpg
DELETED
|
Binary file (423 kB)
|
|
|
vggsfm_code/examples/british_museum/images_10/632.jpg
DELETED
|
Binary file (561 kB)
|
|
|
vggsfm_code/examples/british_museum/images_10/767.jpg
DELETED
|
Binary file (355 kB)
|
|
|
vggsfm_code/examples/british_museum/images_10/886.jpg
DELETED
|
Binary file (339 kB)
|
|
|
vggsfm_code/examples/in2n_face/images/001.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/002.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/003.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/004.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/005.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/006.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/007.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/008.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/009.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/010.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/011.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/012.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/013.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/014.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/015.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/016.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/017.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/018.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/019.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/020.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/021.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/022.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/023.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/024.jpg
ADDED
|
vggsfm_code/examples/in2n_face/images/025.jpg
ADDED
|
vggsfm_code/examples/videos/british_museum_video.mp4
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39bbcf545761bbeeb4e3cba24622783e93d608fcfe4217ca4575b4bf81178166
|
| 3 |
+
size 929009
|
vggsfm_code/examples/videos/in2n_face_video.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:746e08978e78446b494e70fc04579beaab3210f6e39b3a72a385bd7d22a112ff
|
| 3 |
+
size 2781740
|
viz_utils/__pycache__/viz_fn.cpython-310.pyc
CHANGED
|
Binary files a/viz_utils/__pycache__/viz_fn.cpython-310.pyc and b/viz_utils/__pycache__/viz_fn.cpython-310.pyc differ
|
|
|
viz_utils/viz_fn.py
CHANGED
|
@@ -19,6 +19,46 @@ from datetime import datetime
|
|
| 19 |
from scipy.spatial.transform import Rotation
|
| 20 |
import PIL
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
def add_camera(scene, pose_c2w, edge_color, image=None,
|
| 24 |
focal=None, imsize=None,
|
|
|
|
| 19 |
from scipy.spatial.transform import Rotation
|
| 20 |
import PIL
|
| 21 |
|
| 22 |
+
from scipy.spatial import cKDTree
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_density_np(pcl, K=0.005):
|
| 27 |
+
if isinstance(K, float):
|
| 28 |
+
K = max(int(K * pcl.shape[0]), 1)
|
| 29 |
+
|
| 30 |
+
tree = cKDTree(pcl)
|
| 31 |
+
dists, _ = tree.query(pcl, k=K+1) # K+1 because the point itself is included
|
| 32 |
+
|
| 33 |
+
dists = dists[:, 1:] # Remove the zero distance to itself
|
| 34 |
+
D = np.sqrt(dists).sum(axis=1)
|
| 35 |
+
|
| 36 |
+
return D
|
| 37 |
+
|
| 38 |
+
def apply_density_filter_np(pts, feats=None, density_filter=0.9, density_K=100):
|
| 39 |
+
"""
|
| 40 |
+
:param pts: ndarray of shape (N, 3) representing the point cloud.
|
| 41 |
+
:param feats: ndarray of corresponding features for the point cloud.
|
| 42 |
+
:param density_filter: Float, the percentage of points to keep based on density.
|
| 43 |
+
:param density_K: Int, number of nearest neighbors to consider for density calculation.
|
| 44 |
+
:return: Filtered points and their corresponding features.
|
| 45 |
+
"""
|
| 46 |
+
# Calculate densities
|
| 47 |
+
D = get_density_np(pts, K=density_K)
|
| 48 |
+
|
| 49 |
+
# Apply the density filter
|
| 50 |
+
topk_k = max(int((1 - density_filter) * pts.shape[0]), 1)
|
| 51 |
+
val = np.partition(D, topk_k)[topk_k]
|
| 52 |
+
ok = (D <= val)
|
| 53 |
+
|
| 54 |
+
# Filter points and features
|
| 55 |
+
filtered_pts = pts[ok]
|
| 56 |
+
if feats is not None:
|
| 57 |
+
filtered_feats = feats[ok]
|
| 58 |
+
else:
|
| 59 |
+
filtered_feats = feats
|
| 60 |
+
return filtered_pts, filtered_feats
|
| 61 |
+
|
| 62 |
|
| 63 |
def add_camera(scene, pose_c2w, edge_color, image=None,
|
| 64 |
focal=None, imsize=None,
|