Spaces:
Runtime error
Runtime error
import sys | |
sys.path.append("flash3d") | |
from omegaconf import OmegaConf | |
import gradio as gr | |
import spaces | |
import torch | |
import torchvision.transforms as TT | |
import torchvision.transforms.functional as TTF | |
from huggingface_hub import hf_hub_download | |
from networks.gaussian_predictor import GaussianPredictor | |
from util.vis3d import save_ply | |
def main(): | |
if torch.cuda.is_available(): | |
device = "cuda:0" | |
else: | |
device = "cpu" | |
model_cfg_path = hf_hub_download(repo_id="einsafutdinov/flash3d", | |
filename="config_re10k_v1.yaml") | |
model_path = hf_hub_download(repo_id="einsafutdinov/flash3d", | |
filename="model_re10k_v1.pth") | |
cfg = OmegaConf.load(model_cfg_path) | |
model = GaussianPredictor(cfg) | |
device = torch.device(device) | |
model.load_model(model_path) | |
model.to(device) | |
pad_border_fn = TT.Pad((cfg.dataset.pad_border_aug, cfg.dataset.pad_border_aug)) | |
to_tensor = TT.ToTensor() | |
def check_input_image(input_image): | |
if input_image is None: | |
raise gr.Error("No image uploaded!") | |
def preprocess(image, dynamic_size=False, padding=True): | |
h, w = image.size | |
size = 32 | |
if dynamic_size: | |
while max(h, w) // size > 20: | |
size *=2 | |
crop_image = TTF.center_crop(image, (w // size * size, h // size * size)) | |
resize_image = TTF.resize(crop_image, (w // size * 32, h // size * 32), interpolation=TT.InterpolationMode.BICUBIC) | |
model.cfg.dataset.width, model.cfg.dataset.height = resize_image.size | |
else: | |
model.cfg.dataset.height, model.cfg.dataset.width = 256, 384 | |
resize_image = TTF.resize( | |
image, (model.cfg.dataset.height, model.cfg.dataset.width), | |
interpolation=TT.InterpolationMode.BICUBIC | |
) | |
if padding: | |
input_image = pad_border_fn(resize_image) | |
model.cfg.dataset.pad_border_aug = 32 | |
else: | |
input_image = resize_image | |
model.cfg.dataset.pad_border_aug = 0 | |
model.set_backproject() | |
return input_image | |
def reconstruct_and_export(image): | |
""" | |
Passes image through model, outputs reconstruction in form of a dict of tensors. | |
""" | |
image = to_tensor(image).to(device).unsqueeze(0) | |
inputs = { | |
("color_aug", 0, 0): image, | |
} | |
outputs = model(inputs) | |
# export reconstruction to ply | |
save_ply(outputs, | |
ply_out_path, | |
num_gauss=model.cfg.model.gaussians_per_pixel, | |
h=model.cfg.dataset.height, | |
w=model.cfg.dataset.width, | |
pad=model.cfg.dataset.pad_border_aug) | |
return ply_out_path | |
ply_out_path = f'./mesh.ply' | |
css = """ | |
h1 { | |
text-align: center; | |
display:block; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown( | |
""" | |
# Flash3D | |
**Flash3D** [[project page](https://www.robots.ox.ac.uk/~vgg/research/flash3d/)] is a fast, super efficient, trainable on a single GPU in a day for scene 3D reconstruction from a single image. | |
The model used in the demo was trained on only **RealEstate10k dataset on a single A6000 GPU within 1 day**. | |
Upload an image of a scene or click on one of the provided examples to see how the Flash3D does. | |
The 3D viewer will render a .ply scene exported from the 3D Gaussians, which is only an approximation. | |
""" | |
) | |
with gr.Row(variant="panel"): | |
with gr.Column(scale=1): | |
with gr.Row(): | |
input_image = gr.Image( | |
label="Input Image", | |
image_mode="RGBA", | |
sources="upload", | |
type="pil", | |
elem_id="content_image", | |
) | |
with gr.Row(): | |
submit = gr.Button("Generate", elem_id="generate", variant="primary") | |
with gr.Row(): | |
dynamic_size = gr.Checkbox(True, interactive=True, label='Use the original image ratio') | |
padding = gr.Checkbox(True, interactive=True, label='add padding to the image') | |
with gr.Row(variant="panel"): | |
gr.Examples( | |
examples=[ | |
'./demo_examples/bedroom_01.png', | |
'./demo_examples/kitti_02.png', | |
'./demo_examples/kitti_03.png', | |
'./demo_examples/re10k_05.jpg', | |
'./demo_examples/re10k_06.jpg', | |
'./demo_examples/christ_church_cathedral.png', | |
'./demo_examples/radcliffe.png', | |
'./demo_examples/blenheim_palace_bedroom.png', | |
'./demo_examples/blenheim_palace_living.png', | |
'./demo_examples/blenheim_palace.JPG', | |
], | |
inputs=[input_image], | |
cache_examples=False, | |
label="Examples", | |
examples_per_page=20, | |
) | |
with gr.Row(): | |
processed_image = gr.Image(label="Processed Image", interactive=False) | |
with gr.Column(scale=2): | |
with gr.Row(): | |
with gr.Tab("Reconstruction"): | |
output_model = gr.Model3D( | |
height=640, | |
label="Output Model", | |
interactive=False | |
) | |
gr.Markdown( | |
""" | |
## Comments: | |
1. If you run the demo online, the first example you upload should take about 25 seconds (with preprocessing, saving and overhead), the following take about 14s (due to the .ply visualisation). | |
2. The 3D viewer shows a .ply mesh extracted from a mix of 3D Gaussians. This is only an approximations and artefacts might show. | |
3. Known limitations include: | |
- a black dot appearing on the model from some viewpoints | |
- while the multiple gaussians fill in resonable pixels to the invisible parts, the visual quality is still blurry. | |
4. It achieves state-of-the-art results when trained and tested on RealEstate10k., and is **much** cheaper to train and run. | |
5. When transferred to unseen datasets like NYU it outperforms competitors by a large margin. | |
6. More impressively, when transferred to KITTI, Flash3D achieves better PSNR than methods trained specifically on that dataset. | |
## How does it work? | |
Given a single image I as input, Flash3D first estimates the metric depth D using a frozen off-the-shelf network. | |
Then, a ResNet50-like encoder–decoder network predicts a set of shape and appearance parameters P of K layers of Gaussians for every pixel u, | |
allowing unobserved and occluded surfaces to be modelled. | |
From these predicted components, the depth can be obtained by summing the predicted (positive) offsets δi with the predicted monocular depth D, | |
allowing the mean vector for every layer of Gaussians to be computed. | |
This strategy ensures that the layers are depth-ordered, encouraging the network to model occluded surfaces. | |
For more results see the [project page](https://www.robots.ox.ac.uk/~vgg/research/flash3d/). | |
""" | |
) | |
submit.click(fn=check_input_image, inputs=[input_image]).success( | |
fn=preprocess, | |
inputs=[input_image, dynamic_size, padding], | |
outputs=[processed_image], | |
).success( | |
fn=reconstruct_and_export, | |
inputs=[processed_image], | |
outputs=[output_model], | |
) | |
demo.queue(max_size=1) | |
demo.launch() | |
if __name__ == "__main__": | |
main() | |