Spaces:
Running
Running
- Dockerfile +69 -0
- README.md +3 -3
- app/__init__.py +0 -0
- app/utils.py +84 -0
- main.py +76 -0
- requirements.txt +6 -0
Dockerfile
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11.12-slim
|
2 |
+
#3.10-slim
|
3 |
+
|
4 |
+
|
5 |
+
ENV NUMBA_CACHE_DIR=/tmp
|
6 |
+
ENV HF_HOME=/tmp/huggingface
|
7 |
+
ENV TRANSFORMERS_CACHE=/tmp/huggingface/transformers
|
8 |
+
ENV HF_HUB_CACHE=/tmp/huggingface/hub
|
9 |
+
|
10 |
+
WORKDIR /app
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
RUN mkdir -p /app/tmp && chmod -R 777 /app/tmp
|
15 |
+
RUN mkdir -p /app/tmp/huggingface/token && chmod -R 777 /app/tmp/huggingface/token
|
16 |
+
# Install git, build tools, and dependencies
|
17 |
+
RUN apt-get update && apt-get install -y \
|
18 |
+
git \
|
19 |
+
build-essential \
|
20 |
+
cmake \
|
21 |
+
libgl1-mesa-glx \
|
22 |
+
libglib2.0-0 \
|
23 |
+
libsm6 \
|
24 |
+
libxext6 \
|
25 |
+
libxrender1 \
|
26 |
+
&& rm -rf /var/lib/apt/lists/*
|
27 |
+
|
28 |
+
|
29 |
+
# Clone the repository
|
30 |
+
RUN git clone https://github.com/Stability-AI/stable-fast-3d.git
|
31 |
+
|
32 |
+
# Install PyTorch first (needed for the local package builds)
|
33 |
+
RUN pip install --no-cache-dir torch==2.6.0
|
34 |
+
#2.1.0
|
35 |
+
|
36 |
+
# Install Python dependencies (except local packages)
|
37 |
+
# Create a modified requirements file without the local packages
|
38 |
+
RUN grep -v "^\./" stable-fast-3d/requirements.txt > modified_requirements.txt && \
|
39 |
+
pip install --no-cache-dir -r modified_requirements.txt
|
40 |
+
|
41 |
+
# Install the local packages
|
42 |
+
# First texture_baker
|
43 |
+
WORKDIR /app/stable-fast-3d/texture_baker
|
44 |
+
RUN pip install -e .
|
45 |
+
# Then uv_unwrapper
|
46 |
+
WORKDIR /app/stable-fast-3d/uv_unwrapper
|
47 |
+
RUN pip install -e .
|
48 |
+
# Return to app directory
|
49 |
+
WORKDIR /app
|
50 |
+
# Copy the FastAPI application code
|
51 |
+
COPY requirements.txt .
|
52 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
53 |
+
RUN pip install --no-cache-dir transformers==4.38.1 huggingface_hub==0.20.3
|
54 |
+
|
55 |
+
|
56 |
+
RUN useradd -m -u 1000 user
|
57 |
+
|
58 |
+
# Switch to the "user" user
|
59 |
+
USER user
|
60 |
+
|
61 |
+
COPY --chown=user app/ app/
|
62 |
+
COPY --chown=user main.py .
|
63 |
+
|
64 |
+
# Expose the port the app will run on
|
65 |
+
EXPOSE 7860
|
66 |
+
|
67 |
+
|
68 |
+
# Command to run the application
|
69 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
title: 3D Model AI
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
---
|
|
|
1 |
---
|
2 |
title: 3D Model AI
|
3 |
+
emoji: 🌍
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: green
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
---
|
app/__init__.py
ADDED
File without changes
|
app/utils.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
import glob
|
5 |
+
import logging
|
6 |
+
|
7 |
+
# Set up logging
|
8 |
+
logging.basicConfig(level=logging.INFO)
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
def run_inference(image_path: str, output_dir: str):
|
12 |
+
"""
|
13 |
+
Run Stable Fast 3D inference on the input image
|
14 |
+
|
15 |
+
Args:
|
16 |
+
image_path: Path to the input image
|
17 |
+
output_dir: Directory to save output files
|
18 |
+
|
19 |
+
Returns:
|
20 |
+
Path to the generated GLB file
|
21 |
+
"""
|
22 |
+
# Get the repository path
|
23 |
+
repo_path = Path("/app/stable-fast-3d")
|
24 |
+
|
25 |
+
|
26 |
+
logger.info(f"Running inference on image: {image_path}")
|
27 |
+
logger.info(f"Output directory: {output_dir}")
|
28 |
+
|
29 |
+
# Run the model
|
30 |
+
command = [
|
31 |
+
"python", "run.py",
|
32 |
+
image_path,
|
33 |
+
"--output-dir", output_dir,
|
34 |
+
"--pretrained-model", "stabilityai/stable-fast-3d",
|
35 |
+
"--texture-resolution", "2048",
|
36 |
+
"--remesh_option", "quad"
|
37 |
+
]
|
38 |
+
|
39 |
+
logger.info(f"Running command: {' '.join(command)}")
|
40 |
+
print(f"Reached at subprocess.run() The output path is {output_dir}")
|
41 |
+
|
42 |
+
try:
|
43 |
+
HF_CACHE_PATH = "/tmp/huggingface_cache"
|
44 |
+
os.makedirs(HF_CACHE_PATH, exist_ok=True)
|
45 |
+
env = os.environ.copy()
|
46 |
+
env["HF_HOME"] = "/tmp/huggingface"
|
47 |
+
env["TRANSFORMERS_CACHE"] = HF_CACHE_PATH
|
48 |
+
env["HF_HUB_CACHE"] = HF_CACHE_PATH
|
49 |
+
env["NUMBA_CACHE_DIR"] = "/tmp"
|
50 |
+
|
51 |
+
result = subprocess.run(
|
52 |
+
command,
|
53 |
+
cwd=repo_path,
|
54 |
+
check=True,
|
55 |
+
capture_output=True,
|
56 |
+
text=True,
|
57 |
+
env=env) # pass updated env here
|
58 |
+
|
59 |
+
|
60 |
+
logger.info(f"Subprocess STDOUT:\n{result.stdout}")
|
61 |
+
logger.info(f"Subprocess STDERR:\n{result.stderr}")
|
62 |
+
|
63 |
+
except subprocess.CalledProcessError as e:
|
64 |
+
logger.error(f"Subprocess failed with exit code {e.returncode}")
|
65 |
+
logger.error(f"STDOUT:\n{e.stdout}")
|
66 |
+
logger.error(f"STDERR:\n{e.stderr}")
|
67 |
+
raise Exception(f"Subprocess failed: {e.stderr}")
|
68 |
+
|
69 |
+
# Find the generated GLB file
|
70 |
+
# The model typically outputs files to a subdirectory named after the input image
|
71 |
+
# without the file extension
|
72 |
+
base_name = os.path.basename(image_path)
|
73 |
+
file_name_without_ext = os.path.splitext(base_name)[0]
|
74 |
+
|
75 |
+
# Look for GLB files in the output directory and its subdirectories
|
76 |
+
glb_files = glob.glob(os.path.join(output_dir, "**", "*.glb"), recursive=True)
|
77 |
+
|
78 |
+
if not glb_files:
|
79 |
+
logger.error(f"No GLB files found in {output_dir}")
|
80 |
+
raise Exception("No GLB file was generated")
|
81 |
+
|
82 |
+
logger.info(f"Found GLB file: {glb_files[0]}")
|
83 |
+
print(f"Returned the path {glb_files[0]}")
|
84 |
+
return glb_files[0]
|
main.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException, Query
|
2 |
+
from fastapi.responses import FileResponse
|
3 |
+
import os
|
4 |
+
import shutil
|
5 |
+
import uuid
|
6 |
+
import requests
|
7 |
+
from typing import Optional
|
8 |
+
from app.utils import run_inference
|
9 |
+
|
10 |
+
from huggingface_hub import login
|
11 |
+
|
12 |
+
hf_token = os.environ.get("HF_TOKEN")
|
13 |
+
print(hf_token)
|
14 |
+
login(token=hf_token)
|
15 |
+
|
16 |
+
|
17 |
+
app = FastAPI(title="Stable Fast 3D API")
|
18 |
+
|
19 |
+
@app.get("/")
|
20 |
+
async def root():
|
21 |
+
return {"message": "Welcome to Stable Fast 3D API. Use /generate-3d endpoint to convert 2D images to 3D models."}
|
22 |
+
|
23 |
+
@app.post("/generate-3d/")
|
24 |
+
async def generate_3d_model_upload(image: UploadFile = File(...)):
|
25 |
+
"""Generate 3D model from uploaded image file"""
|
26 |
+
return await process_image(image=image)
|
27 |
+
|
28 |
+
@app.get("/generate-3d/")
|
29 |
+
async def generate_3d_model_url(image_url: str = Query(..., description="URL of the image to convert to 3D")):
|
30 |
+
"""Generate 3D model from image URL"""
|
31 |
+
return await process_image(image_url=image_url)
|
32 |
+
|
33 |
+
async def process_image(image: Optional[UploadFile] = None, image_url: Optional[str] = None):
|
34 |
+
# Create unique ID for this request
|
35 |
+
temp_id = str(uuid.uuid4())
|
36 |
+
input_path = f"/app/tmp/{temp_id}.png"
|
37 |
+
output_dir = f"/app/tmp/{temp_id}_output"
|
38 |
+
os.makedirs(output_dir, exist_ok=True)
|
39 |
+
|
40 |
+
try:
|
41 |
+
# Handle image from upload or URL
|
42 |
+
if image:
|
43 |
+
with open(input_path, "wb") as f:
|
44 |
+
shutil.copyfileobj(image.file, f)
|
45 |
+
elif image_url:
|
46 |
+
response = requests.get(image_url, stream=True)
|
47 |
+
if response.status_code != 200:
|
48 |
+
raise HTTPException(status_code=400, detail="Could not download image from URL")
|
49 |
+
|
50 |
+
with open(input_path, "wb") as f:
|
51 |
+
for chunk in response.iter_content(chunk_size=8192):
|
52 |
+
f.write(chunk)
|
53 |
+
else:
|
54 |
+
raise HTTPException(status_code=400, detail="Either image file or image URL must be provided")
|
55 |
+
|
56 |
+
# Run the inference
|
57 |
+
glb_path = run_inference(input_path, output_dir)
|
58 |
+
|
59 |
+
if not os.path.exists(glb_path):
|
60 |
+
raise HTTPException(status_code=500, detail="Failed to generate 3D model")
|
61 |
+
|
62 |
+
# Return the GLB file
|
63 |
+
return FileResponse(
|
64 |
+
path=glb_path,
|
65 |
+
media_type="model/gltf-binary",
|
66 |
+
filename="model.glb",
|
67 |
+
headers={"Content-Disposition": f"attachment; filename=model.glb"}
|
68 |
+
)
|
69 |
+
|
70 |
+
except Exception as e:
|
71 |
+
raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")
|
72 |
+
|
73 |
+
finally:
|
74 |
+
# Clean up temporary files
|
75 |
+
if os.path.exists(input_path):
|
76 |
+
os.remove(input_path)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.103.1
|
2 |
+
uvicorn==0.23.2
|
3 |
+
python-multipart==0.0.6
|
4 |
+
requests==2.31.0
|
5 |
+
pillow==10.0.0
|
6 |
+
huggingface-hub==0.17.3
|