|
import inspect |
|
import math |
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union |
|
|
|
import numpy as np |
|
import PIL |
|
import PIL.Image |
|
import torch |
|
import trimesh |
|
from diffusers.image_processor import PipelineImageInput |
|
from diffusers.pipelines.pipeline_utils import DiffusionPipeline |
|
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler |
|
from diffusers.utils import logging |
|
from diffusers.utils.torch_utils import randn_tensor |
|
from transformers import ( |
|
BitImageProcessor, |
|
CLIPTokenizer, |
|
CLIPTextModelWithProjection, |
|
Dinov2Model, |
|
) |
|
from ..inference_utils import hierarchical_extract_geometry, flash_extract_geometry |
|
|
|
from ..models.autoencoders import TripoSGVAEModel |
|
from ..models.transformers import TripoSGDiTModel |
|
from .pipeline_triposg_output import TripoSGPipelineOutput |
|
from .pipeline_utils import TransformerDiffusionMixin |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
def retrieve_timesteps( |
|
scheduler, |
|
num_inference_steps: Optional[int] = None, |
|
device: Optional[Union[str, torch.device]] = None, |
|
timesteps: Optional[List[int]] = None, |
|
sigmas: Optional[List[float]] = None, |
|
**kwargs, |
|
): |
|
""" |
|
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles |
|
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. |
|
|
|
Args: |
|
scheduler (`SchedulerMixin`): |
|
The scheduler to get timesteps from. |
|
num_inference_steps (`int`): |
|
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` |
|
must be `None`. |
|
device (`str` or `torch.device`, *optional*): |
|
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. |
|
timesteps (`List[int]`, *optional*): |
|
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, |
|
`num_inference_steps` and `sigmas` must be `None`. |
|
sigmas (`List[float]`, *optional*): |
|
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, |
|
`num_inference_steps` and `timesteps` must be `None`. |
|
|
|
Returns: |
|
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the |
|
second element is the number of inference steps. |
|
""" |
|
if timesteps is not None and sigmas is not None: |
|
raise ValueError( |
|
"Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values" |
|
) |
|
if timesteps is not None: |
|
accepts_timesteps = "timesteps" in set( |
|
inspect.signature(scheduler.set_timesteps).parameters.keys() |
|
) |
|
if not accepts_timesteps: |
|
raise ValueError( |
|
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" |
|
f" timestep schedules. Please check whether you are using the correct scheduler." |
|
) |
|
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) |
|
timesteps = scheduler.timesteps |
|
num_inference_steps = len(timesteps) |
|
elif sigmas is not None: |
|
accept_sigmas = "sigmas" in set( |
|
inspect.signature(scheduler.set_timesteps).parameters.keys() |
|
) |
|
if not accept_sigmas: |
|
raise ValueError( |
|
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" |
|
f" sigmas schedules. Please check whether you are using the correct scheduler." |
|
) |
|
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) |
|
timesteps = scheduler.timesteps |
|
num_inference_steps = len(timesteps) |
|
else: |
|
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) |
|
timesteps = scheduler.timesteps |
|
return timesteps, num_inference_steps |
|
|
|
|
|
class TripoSGScribblePipeline(DiffusionPipeline, TransformerDiffusionMixin): |
|
""" |
|
Pipeline for (scribble and text)-to-3D generation. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
vae: TripoSGVAEModel, |
|
transformer: TripoSGDiTModel, |
|
scheduler: FlowMatchEulerDiscreteScheduler, |
|
tokenizer: CLIPTokenizer, |
|
text_encoder: CLIPTextModelWithProjection, |
|
image_encoder_dinov2: Dinov2Model, |
|
feature_extractor_dinov2: BitImageProcessor, |
|
): |
|
super().__init__() |
|
|
|
self.register_modules( |
|
vae=vae, |
|
transformer=transformer, |
|
scheduler=scheduler, |
|
tokenizer=tokenizer, |
|
text_encoder=text_encoder, |
|
image_encoder_dinov2=image_encoder_dinov2, |
|
feature_extractor_dinov2=feature_extractor_dinov2 |
|
) |
|
|
|
@property |
|
def guidance_scale(self): |
|
return self._guidance_scale |
|
|
|
@property |
|
def do_classifier_free_guidance(self): |
|
return self._guidance_scale > 1 |
|
|
|
@property |
|
def num_timesteps(self): |
|
return self._num_timesteps |
|
|
|
@property |
|
def attention_kwargs(self): |
|
return self._attention_kwargs |
|
|
|
def encode_text(self, prompt, device, num_shapes_per_prompt): |
|
input_ids = self.tokenizer( |
|
[prompt], max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" |
|
)['input_ids'].to(device) |
|
text_embeds = self.text_encoder(input_ids).last_hidden_state |
|
text_embeds = text_embeds.repeat_interleave(num_shapes_per_prompt, dim=0) |
|
uncond_text_embeds = torch.zeros_like(text_embeds) |
|
return text_embeds, uncond_text_embeds |
|
|
|
def encode_image(self, image, device, num_shapes_per_prompt): |
|
dtype = next(self.image_encoder_dinov2.parameters()).dtype |
|
|
|
if not isinstance(image, torch.Tensor): |
|
image = self.feature_extractor_dinov2(image, return_tensors="pt").pixel_values |
|
|
|
image = image.to(device=device, dtype=dtype) |
|
image_embeds = self.image_encoder_dinov2(image).last_hidden_state |
|
image_embeds = image_embeds.repeat_interleave(num_shapes_per_prompt, dim=0) |
|
uncond_image_embeds = torch.zeros_like(image_embeds) |
|
|
|
return image_embeds, uncond_image_embeds |
|
|
|
def prepare_latents( |
|
self, |
|
batch_size, |
|
num_tokens, |
|
num_channels_latents, |
|
dtype, |
|
device, |
|
generator, |
|
latents: Optional[torch.Tensor] = None, |
|
): |
|
if latents is not None: |
|
return latents.to(device=device, dtype=dtype) |
|
|
|
shape = (batch_size, num_tokens, num_channels_latents) |
|
|
|
if isinstance(generator, list) and len(generator) != batch_size: |
|
raise ValueError( |
|
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
|
f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
|
) |
|
|
|
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
|
|
|
return latents |
|
|
|
@torch.no_grad() |
|
def __call__( |
|
self, |
|
image: PipelineImageInput, |
|
prompt: str, |
|
num_tokens: int = 512, |
|
num_inference_steps: int = 50, |
|
timesteps: List[int] = None, |
|
guidance_scale: float = 7.0, |
|
num_shapes_per_prompt: int = 1, |
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
|
latents: Optional[torch.FloatTensor] = None, |
|
attention_kwargs: Optional[Dict[str, Any]] = None, |
|
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, |
|
callback_on_step_end_tensor_inputs: List[str] = ["latents"], |
|
bounds: Union[Tuple[float], List[float], float] = (-1.005, -1.005, -1.005, 1.005, 1.005, 1.005), |
|
dense_octree_depth: int = 9, |
|
hierarchical_octree_depth: int = 9, |
|
flash_octree_depth: int = 9, |
|
use_flash_decoder: bool = True, |
|
return_dict: bool = True, |
|
): |
|
self._guidance_scale = guidance_scale |
|
self._attention_kwargs = attention_kwargs |
|
|
|
|
|
if isinstance(image, PIL.Image.Image): |
|
batch_size = 1 |
|
elif isinstance(image, list): |
|
batch_size = len(image) |
|
elif isinstance(image, torch.Tensor): |
|
batch_size = image.shape[0] |
|
else: |
|
raise ValueError("Invalid input type for image") |
|
|
|
device = self._execution_device |
|
|
|
|
|
text_embeds, negative_text_embeds = self.encode_text( |
|
prompt, device, num_shapes_per_prompt |
|
) |
|
|
|
image_embeds, negative_image_embeds = self.encode_image( |
|
image, device, num_shapes_per_prompt |
|
) |
|
|
|
if self.do_classifier_free_guidance: |
|
text_embeds = torch.cat([negative_text_embeds, text_embeds], dim=0) |
|
image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) |
|
|
|
|
|
timesteps, num_inference_steps = retrieve_timesteps( |
|
self.scheduler, num_inference_steps, device, timesteps |
|
) |
|
num_warmup_steps = max( |
|
len(timesteps) - num_inference_steps * self.scheduler.order, 0 |
|
) |
|
self._num_timesteps = len(timesteps) |
|
|
|
|
|
num_channels_latents = self.transformer.config.in_channels |
|
latents = self.prepare_latents( |
|
batch_size * num_shapes_per_prompt, |
|
num_tokens, |
|
num_channels_latents, |
|
image_embeds.dtype, |
|
device, |
|
generator, |
|
latents, |
|
) |
|
|
|
|
|
with self.progress_bar(total=num_inference_steps) as progress_bar: |
|
for i, t in enumerate(timesteps): |
|
|
|
|
|
latent_model_input = ( |
|
torch.cat([latents] * 2) |
|
if self.do_classifier_free_guidance |
|
else latents |
|
) |
|
|
|
timestep = t.expand(latent_model_input.shape[0]) |
|
|
|
noise_pred = self.transformer( |
|
latent_model_input, |
|
timestep.to(dtype=latents.dtype), |
|
encoder_hidden_states=text_embeds, |
|
encoder_hidden_states_2=image_embeds, |
|
attention_kwargs=attention_kwargs, |
|
return_dict=False, |
|
)[0] |
|
|
|
|
|
if self.do_classifier_free_guidance: |
|
noise_pred_uncond, noise_pred_image = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + self.guidance_scale * ( |
|
noise_pred_image - noise_pred_uncond |
|
) |
|
|
|
|
|
latents_dtype = latents.dtype |
|
latents = self.scheduler.step( |
|
noise_pred, t, latents, return_dict=False |
|
)[0] |
|
|
|
if latents.dtype != latents_dtype: |
|
if torch.backends.mps.is_available(): |
|
|
|
latents = latents.to(latents_dtype) |
|
|
|
if callback_on_step_end is not None: |
|
callback_kwargs = {} |
|
for k in callback_on_step_end_tensor_inputs: |
|
callback_kwargs[k] = locals()[k] |
|
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) |
|
|
|
latents = callback_outputs.pop("latents", latents) |
|
|
|
|
|
if i == len(timesteps) - 1 or ( |
|
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 |
|
): |
|
progress_bar.update() |
|
|
|
|
|
if not use_flash_decoder: |
|
geometric_func = lambda x: self.vae.decode(latents, sampled_points=x).sample |
|
output = hierarchical_extract_geometry( |
|
geometric_func, |
|
device, |
|
bounds=bounds, |
|
dense_octree_depth=dense_octree_depth, |
|
hierarchical_octree_depth=hierarchical_octree_depth, |
|
) |
|
else: |
|
self.vae.set_flash_decoder() |
|
output = flash_extract_geometry( |
|
latents, |
|
self.vae, |
|
bounds=bounds, |
|
octree_depth=flash_octree_depth, |
|
) |
|
meshes = [trimesh.Trimesh(mesh_v_f[0].astype(np.float32), mesh_v_f[1]) for mesh_v_f in output] |
|
|
|
|
|
self.maybe_free_model_hooks() |
|
|
|
if not return_dict: |
|
return (output, meshes) |
|
|
|
return TripoSGPipelineOutput(samples=output, meshes=meshes) |
|
|