id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
189,543 | import random
from typing import Optional, Sequence
import numpy as np
import torch
from datasets.utils import Rays, namedtuple_map
from torch.utils.data._utils.collate import collate, default_collate_fn_map
from nerfacc.estimators.occ_grid import OccGridEstimator
from nerfacc.estimators.prop_net import PropNetEstimator
from nerfacc.grid import ray_aabb_intersect, traverse_grids
from nerfacc.volrend import (
accumulate_along_rays_,
render_weight_from_density,
rendering,
)
Rays = collections.namedtuple("Rays", ("origins", "viewdirs"))
def namedtuple_map(fn, tup):
"""Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple."""
return type(tup)(*(None if x is None else fn(x) for x in tup))
class OccGridEstimator(AbstractEstimator):
"""Occupancy grid transmittance estimator for spatial skipping.
References: "Instant Neural Graphics Primitives."
Args:
roi_aabb: The axis-aligned bounding box of the region of interest. Useful for mapping
the 3D space to the grid.
resolution: The resolution of the grid. If an integer is given, the grid is assumed to
be a cube. Otherwise, a list or a tensor of shape (3,) is expected. Default: 128.
levels: The number of levels of the grid. Default: 1.
"""
DIM: int = 3
def __init__(
self,
roi_aabb: Union[List[int], Tensor],
resolution: Union[int, List[int], Tensor] = 128,
levels: int = 1,
**kwargs,
) -> None:
super().__init__()
if "contraction_type" in kwargs:
raise ValueError(
"`contraction_type` is not supported anymore for nerfacc >= 0.4.0."
)
# check the resolution is legal
if isinstance(resolution, int):
resolution = [resolution] * self.DIM
if isinstance(resolution, (list, tuple)):
resolution = torch.tensor(resolution, dtype=torch.int32)
assert isinstance(resolution, Tensor), f"Invalid type: {resolution}!"
assert resolution.shape[0] == self.DIM, f"Invalid shape: {resolution}!"
# check the roi_aabb is legal
if isinstance(roi_aabb, (list, tuple)):
roi_aabb = torch.tensor(roi_aabb, dtype=torch.float32)
assert isinstance(roi_aabb, Tensor), f"Invalid type: {roi_aabb}!"
assert roi_aabb.shape[0] == self.DIM * 2, f"Invalid shape: {roi_aabb}!"
# multiple levels of aabbs
aabbs = torch.stack(
[_enlarge_aabb(roi_aabb, 2**i) for i in range(levels)], dim=0
)
# total number of voxels
self.cells_per_lvl = int(resolution.prod().item())
self.levels = levels
# Buffers
self.register_buffer("resolution", resolution) # [3]
self.register_buffer("aabbs", aabbs) # [n_aabbs, 6]
self.register_buffer(
"occs", torch.zeros(self.levels * self.cells_per_lvl)
)
self.register_buffer(
"binaries",
torch.zeros([levels] + resolution.tolist(), dtype=torch.bool),
)
# Grid coords & indices
grid_coords = _meshgrid3d(resolution).reshape(
self.cells_per_lvl, self.DIM
)
self.register_buffer("grid_coords", grid_coords, persistent=False)
grid_indices = torch.arange(self.cells_per_lvl)
self.register_buffer("grid_indices", grid_indices, persistent=False)
def sampling(
self,
# rays
rays_o: Tensor, # [n_rays, 3]
rays_d: Tensor, # [n_rays, 3]
# sigma/alpha function for skipping invisible space
sigma_fn: Optional[Callable] = None,
alpha_fn: Optional[Callable] = None,
near_plane: float = 0.0,
far_plane: float = 1e10,
t_min: Optional[Tensor] = None, # [n_rays]
t_max: Optional[Tensor] = None, # [n_rays]
# rendering options
render_step_size: float = 1e-3,
early_stop_eps: float = 1e-4,
alpha_thre: float = 0.0,
stratified: bool = False,
cone_angle: float = 0.0,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Sampling with spatial skipping.
Note:
This function is not differentiable to any inputs.
Args:
rays_o: Ray origins of shape (n_rays, 3).
rays_d: Normalized ray directions of shape (n_rays, 3).
sigma_fn: Optional. If provided, the marching will skip the invisible space
by evaluating the density along the ray with `sigma_fn`. It should be a
function that takes in samples {t_starts (N,), t_ends (N,),
ray indices (N,)} and returns the post-activation density values (N,).
You should only provide either `sigma_fn` or `alpha_fn`.
alpha_fn: Optional. If provided, the marching will skip the invisible space
by evaluating the density along the ray with `alpha_fn`. It should be a
function that takes in samples {t_starts (N,), t_ends (N,),
ray indices (N,)} and returns the post-activation opacity values (N,).
You should only provide either `sigma_fn` or `alpha_fn`.
near_plane: Optional. Near plane distance. Default: 0.0.
far_plane: Optional. Far plane distance. Default: 1e10.
t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).
If provided, the marching will start from maximum of t_min and near_plane.
t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).
If provided, the marching will stop by minimum of t_max and far_plane.
render_step_size: Step size for marching. Default: 1e-3.
early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.
alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.
stratified: Whether to use stratified sampling. Default: False.
cone_angle: Cone angle for linearly-increased step size. 0. means
constant step size. Default: 0.0.
Returns:
A tuple of {LongTensor, Tensor, Tensor}:
- **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).
- **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).
- **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).
Examples:
.. code-block:: python
>>> ray_indices, t_starts, t_ends = grid.sampling(
>>> rays_o, rays_d, render_step_size=1e-3)
>>> t_mid = (t_starts + t_ends) / 2.0
>>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]
"""
near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)
far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)
if t_min is not None:
near_planes = torch.clamp(near_planes, min=t_min)
if t_max is not None:
far_planes = torch.clamp(far_planes, max=t_max)
if stratified:
near_planes += torch.rand_like(near_planes) * render_step_size
intervals, samples, _ = traverse_grids(
rays_o,
rays_d,
self.binaries,
self.aabbs,
near_planes=near_planes,
far_planes=far_planes,
step_size=render_step_size,
cone_angle=cone_angle,
)
t_starts = intervals.vals[intervals.is_left]
t_ends = intervals.vals[intervals.is_right]
ray_indices = samples.ray_indices
packed_info = samples.packed_info
# skip invisible space
if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (
sigma_fn is not None or alpha_fn is not None
):
alpha_thre = min(alpha_thre, self.occs.mean().item())
# Compute visibility of the samples, and filter out invisible samples
if sigma_fn is not None:
if t_starts.shape[0] != 0:
sigmas = sigma_fn(t_starts, t_ends, ray_indices)
else:
sigmas = torch.empty((0,), device=t_starts.device)
assert (
sigmas.shape == t_starts.shape
), "sigmas must have shape of (N,)! Got {}".format(sigmas.shape)
masks = render_visibility_from_density(
t_starts=t_starts,
t_ends=t_ends,
sigmas=sigmas,
packed_info=packed_info,
early_stop_eps=early_stop_eps,
alpha_thre=alpha_thre,
)
elif alpha_fn is not None:
if t_starts.shape[0] != 0:
alphas = alpha_fn(t_starts, t_ends, ray_indices)
else:
alphas = torch.empty((0,), device=t_starts.device)
assert (
alphas.shape == t_starts.shape
), "alphas must have shape of (N,)! Got {}".format(alphas.shape)
masks = render_visibility_from_alpha(
alphas=alphas,
packed_info=packed_info,
early_stop_eps=early_stop_eps,
alpha_thre=alpha_thre,
)
ray_indices, t_starts, t_ends = (
ray_indices[masks],
t_starts[masks],
t_ends[masks],
)
return ray_indices, t_starts, t_ends
def update_every_n_steps(
self,
step: int,
occ_eval_fn: Callable,
occ_thre: float = 1e-2,
ema_decay: float = 0.95,
warmup_steps: int = 256,
n: int = 16,
) -> None:
"""Update the estimator every n steps during training.
Args:
step: Current training step.
occ_eval_fn: A function that takes in sample locations :math:`(N, 3)` and
returns the occupancy values :math:`(N, 1)` at those locations.
occ_thre: Threshold used to binarize the occupancy grid. Default: 1e-2.
ema_decay: The decay rate for EMA updates. Default: 0.95.
warmup_steps: Sample all cells during the warmup stage. After the warmup
stage we change the sampling strategy to 1/4 uniformly sampled cells
together with 1/4 occupied cells. Default: 256.
n: Update the grid every n steps. Default: 16.
"""
if not self.training:
raise RuntimeError(
"You should only call this function only during training. "
"Please call _update() directly if you want to update the "
"field during inference."
)
if step % n == 0 and self.training:
self._update(
step=step,
occ_eval_fn=occ_eval_fn,
occ_thre=occ_thre,
ema_decay=ema_decay,
warmup_steps=warmup_steps,
)
# adapted from https://github.com/kwea123/ngp_pl/blob/master/models/networks.py
def mark_invisible_cells(
self,
K: Tensor,
c2w: Tensor,
width: int,
height: int,
near_plane: float = 0.0,
chunk: int = 32**3,
) -> None:
"""Mark the cells that aren't covered by the cameras with density -1.
Should only be executed once before training starts.
Args:
K: Camera intrinsics of shape (N, 3, 3) or (1, 3, 3).
c2w: Camera to world poses of shape (N, 3, 4) or (N, 4, 4).
width: Image width in pixels
height: Image height in pixels
near_plane: Near plane distance
chunk: The chunk size to split the cells (to avoid OOM)
"""
assert K.dim() == 3 and K.shape[1:] == (3, 3)
assert c2w.dim() == 3 and (
c2w.shape[1:] == (3, 4) or c2w.shape[1:] == (4, 4)
)
assert K.shape[0] == c2w.shape[0] or K.shape[0] == 1
N_cams = c2w.shape[0]
w2c_R = c2w[:, :3, :3].transpose(2, 1) # (N_cams, 3, 3)
w2c_T = -w2c_R @ c2w[:, :3, 3:] # (N_cams, 3, 1)
lvl_indices = self._get_all_cells()
for lvl, indices in enumerate(lvl_indices):
grid_coords = self.grid_coords[indices]
for i in range(0, len(indices), chunk):
x = grid_coords[i : i + chunk] / (self.resolution - 1)
indices_chunk = indices[i : i + chunk]
# voxel coordinates [0, 1]^3 -> world
xyzs_w = (
self.aabbs[lvl, :3]
+ x * (self.aabbs[lvl, 3:] - self.aabbs[lvl, :3])
).T
xyzs_c = w2c_R @ xyzs_w + w2c_T # (N_cams, 3, chunk)
uvd = K @ xyzs_c # (N_cams, 3, chunk)
uv = uvd[:, :2] / uvd[:, 2:] # (N_cams, 2, chunk)
in_image = (
(uvd[:, 2] >= 0)
& (uv[:, 0] >= 0)
& (uv[:, 0] < width)
& (uv[:, 1] >= 0)
& (uv[:, 1] < height)
)
covered_by_cam = (
uvd[:, 2] >= near_plane
) & in_image # (N_cams, chunk)
# if the cell is visible by at least one camera
count = covered_by_cam.sum(0) / N_cams
too_near_to_cam = (
uvd[:, 2] < near_plane
) & in_image # (N, chunk)
# if the cell is too close (in front) to any camera
too_near_to_any_cam = too_near_to_cam.any(0)
# a valid cell should be visible by at least one camera and not too close to any camera
valid_mask = (count > 0) & (~too_near_to_any_cam)
cell_ids_base = lvl * self.cells_per_lvl
self.occs[cell_ids_base + indices_chunk] = torch.where(
valid_mask, 0.0, -1.0
)
def _get_all_cells(self) -> List[Tensor]:
"""Returns all cells of the grid."""
lvl_indices = []
for lvl in range(self.levels):
# filter out the cells with -1 density (non-visible to any camera)
cell_ids = lvl * self.cells_per_lvl + self.grid_indices
indices = self.grid_indices[self.occs[cell_ids] >= 0.0]
lvl_indices.append(indices)
return lvl_indices
def _sample_uniform_and_occupied_cells(self, n: int) -> List[Tensor]:
"""Samples both n uniform and occupied cells."""
lvl_indices = []
for lvl in range(self.levels):
uniform_indices = torch.randint(
self.cells_per_lvl, (n,), device=self.device
)
# filter out the cells with -1 density (non-visible to any camera)
cell_ids = lvl * self.cells_per_lvl + uniform_indices
uniform_indices = uniform_indices[self.occs[cell_ids] >= 0.0]
occupied_indices = torch.nonzero(self.binaries[lvl].flatten())[:, 0]
if n < len(occupied_indices):
selector = torch.randint(
len(occupied_indices), (n,), device=self.device
)
occupied_indices = occupied_indices[selector]
indices = torch.cat([uniform_indices, occupied_indices], dim=0)
lvl_indices.append(indices)
return lvl_indices
def _update(
self,
step: int,
occ_eval_fn: Callable,
occ_thre: float = 0.01,
ema_decay: float = 0.95,
warmup_steps: int = 256,
) -> None:
"""Update the occ field in the EMA way."""
# sample cells
if step < warmup_steps:
lvl_indices = self._get_all_cells()
else:
N = self.cells_per_lvl // 4
lvl_indices = self._sample_uniform_and_occupied_cells(N)
for lvl, indices in enumerate(lvl_indices):
# infer occupancy: density * step_size
grid_coords = self.grid_coords[indices]
x = (
grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)
) / self.resolution
# voxel coordinates [0, 1]^3 -> world
x = self.aabbs[lvl, :3] + x * (
self.aabbs[lvl, 3:] - self.aabbs[lvl, :3]
)
occ = occ_eval_fn(x).squeeze(-1)
# ema update
cell_ids = lvl * self.cells_per_lvl + indices
self.occs[cell_ids] = torch.maximum(
self.occs[cell_ids] * ema_decay, occ
)
# suppose to use scatter max but emperically it is almost the same.
# self.occs, _ = scatter_max(
# occ, indices, dim=0, out=self.occs * ema_decay
# )
thre = torch.clamp(self.occs[self.occs >= 0].mean(), max=occ_thre)
self.binaries = (self.occs > thre).view(self.binaries.shape)
def ray_aabb_intersect(
rays_o: Tensor,
rays_d: Tensor,
aabbs: Tensor,
near_plane: float = -float("inf"),
far_plane: float = float("inf"),
miss_value: float = float("inf"),
) -> Tuple[Tensor, Tensor, Tensor]:
"""Ray-AABB intersection.
Args:
rays_o: (n_rays, 3) Ray origins.
rays_d: (n_rays, 3) Normalized ray directions.
aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.
near_plane: Optional. Near plane. Default to -infinity.
far_plane: Optional. Far plane. Default to infinity.
miss_value: Optional. Value to use for tmin and tmax when there is no intersection.
Default to infinity.
Returns:
A tuple of {Tensor, Tensor, BoolTensor}:
- **t_mins**: (n_rays, m) tmin for each ray-AABB pair.
- **t_maxs**: (n_rays, m) tmax for each ray-AABB pair.
- **hits**: (n_rays, m) whether each ray-AABB pair intersects.
"""
assert rays_o.ndim == 2 and rays_o.shape[-1] == 3
assert rays_d.ndim == 2 and rays_d.shape[-1] == 3
assert aabbs.ndim == 2 and aabbs.shape[-1] == 6
t_mins, t_maxs, hits = _C.ray_aabb_intersect(
rays_o.contiguous(),
rays_d.contiguous(),
aabbs.contiguous(),
near_plane,
far_plane,
miss_value,
)
return t_mins, t_maxs, hits
def traverse_grids(
# rays
rays_o: Tensor, # [n_rays, 3]
rays_d: Tensor, # [n_rays, 3]
# grids
binaries: Tensor, # [m, resx, resy, resz]
aabbs: Tensor, # [m, 6]
# options
near_planes: Optional[Tensor] = None, # [n_rays]
far_planes: Optional[Tensor] = None, # [n_rays]
step_size: Optional[float] = 1e-3,
cone_angle: Optional[float] = 0.0,
traverse_steps_limit: Optional[int] = None,
over_allocate: Optional[bool] = False,
rays_mask: Optional[Tensor] = None, # [n_rays]
# pre-compute intersections
t_sorted: Optional[Tensor] = None, # [n_rays, n_grids * 2]
t_indices: Optional[Tensor] = None, # [n_rays, n_grids * 2]
hits: Optional[Tensor] = None, # [n_rays, n_grids]
) -> Tuple[RayIntervals, RaySamples, Tensor]:
"""Ray Traversal within Multiple Grids.
Note:
This function is not differentiable to any inputs.
Args:
rays_o: (n_rays, 3) Ray origins.
rays_d: (n_rays, 3) Normalized ray directions.
binary_grids: (m, resx, resy, resz) Multiple binary grids with the same resolution.
aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.
near_planes: Optional. (n_rays,) Near planes for the traversal to start. Default to 0.
far_planes: Optional. (n_rays,) Far planes for the traversal to end. Default to infinity.
step_size: Optional. Step size for ray traversal. Default to 1e-3.
cone_angle: Optional. Cone angle for linearly-increased step size. 0. means
constant step size. Default: 0.0.
traverse_steps_limit: Optional. Maximum number of samples per ray.
over_allocate: Optional. Whether to over-allocate the memory for the outputs.
rays_mask: Optional. (n_rays,) Skip some rays if given.
t_sorted: Optional. (n_rays, n_grids * 2) Pre-computed sorted t values for each ray-grid pair. Default to None.
t_indices: Optional. (n_rays, n_grids * 2) Pre-computed sorted t indices for each ray-grid pair. Default to None.
hits: Optional. (n_rays, n_grids) Pre-computed hit flags for each ray-grid pair. Default to None.
Returns:
A :class:`RayIntervals` object containing the intervals of the ray traversal, and
a :class:`RaySamples` object containing the samples within each interval.
t :class:`Tensor` of shape (n_rays,) containing the terminated t values for each ray.
"""
if near_planes is None:
near_planes = torch.zeros_like(rays_o[:, 0])
if far_planes is None:
far_planes = torch.full_like(rays_o[:, 0], float("inf"))
if rays_mask is None:
rays_mask = torch.ones_like(rays_o[:, 0], dtype=torch.bool)
if traverse_steps_limit is None:
traverse_steps_limit = -1
if over_allocate:
assert (
traverse_steps_limit > 0
), "traverse_steps_limit must be set if over_allocate is True."
if t_sorted is None or t_indices is None or hits is None:
# Compute ray aabb intersection for all levels of grid. [n_rays, m]
t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, aabbs)
# Sort the t values for each ray. [n_rays, m]
t_sorted, t_indices = torch.sort(
torch.cat([t_mins, t_maxs], dim=-1), dim=-1
)
# Traverse the grids.
intervals, samples, termination_planes = _C.traverse_grids(
# rays
rays_o.contiguous(), # [n_rays, 3]
rays_d.contiguous(), # [n_rays, 3]
rays_mask.contiguous(), # [n_rays]
# grids
binaries.contiguous(), # [m, resx, resy, resz]
aabbs.contiguous(), # [m, 6]
# intersections
t_sorted.contiguous(), # [n_rays, m * 2]
t_indices.contiguous(), # [n_rays, m * 2]
hits.contiguous(), # [n_rays, m]
# options
near_planes.contiguous(), # [n_rays]
far_planes.contiguous(), # [n_rays]
step_size,
cone_angle,
True,
True,
True,
traverse_steps_limit,
over_allocate,
)
return (
RayIntervals._from_cpp(intervals),
RaySamples._from_cpp(samples),
termination_planes,
)
def render_weight_from_density(
t_starts: Tensor,
t_ends: Tensor,
sigmas: Tensor,
packed_info: Optional[Tensor] = None,
ray_indices: Optional[Tensor] = None,
n_rays: Optional[int] = None,
prefix_trans: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Compute rendering weights :math:`w_i` from density :math:`\\sigma_i` and interval :math:`\\delta_i`.
.. math::
w_i = T_i(1 - exp(-\\sigma_i\delta_i)), \\quad\\textrm{where}\\quad T_i = exp(-\\sum_{j=1}^{i-1}\\sigma_j\delta_j)
This function supports both batched and flattened input tensor. For flattened input tensor, either
(`packed_info`) or (`ray_indices` and `n_rays`) should be provided.
Args:
t_starts: The start time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).
t_ends: The end time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).
sigmas: The density values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened samples, with in total n_rays chunks.
Useful for flattened input.
ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).
n_rays: Number of rays. Only useful when `ray_indices` is provided.
prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).
Returns:
The rendering weights, transmittance and opacities, both with the same shape as `sigmas`.
Examples:
.. code-block:: python
>>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device="cuda")
>>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device="cuda")
>>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda")
>>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda")
>>> weights, transmittance, alphas = render_weight_from_density(
>>> t_starts, t_ends, sigmas, ray_indices=ray_indices)
weights: [0.33, 0.37, 0.03, 0.55, 0.04, 0.00, 0.59]
transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00]
alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59]
"""
trans, alphas = render_transmittance_from_density(
t_starts, t_ends, sigmas, packed_info, ray_indices, n_rays, prefix_trans
)
weights = trans * alphas
return weights, trans, alphas
def accumulate_along_rays_(
weights: Tensor,
values: Optional[Tensor] = None,
ray_indices: Optional[Tensor] = None,
outputs: Optional[Tensor] = None,
) -> None:
"""Accumulate volumetric values along the ray.
Inplace version of :func:`accumulate_along_rays`.
"""
if values is None:
src = weights[..., None]
else:
assert values.dim() == weights.dim() + 1
assert weights.shape == values.shape[:-1]
src = weights[..., None] * values
if ray_indices is not None:
assert weights.dim() == 1, "weights must be flattened"
assert (
outputs.dim() == 2 and outputs.shape[-1] == src.shape[-1]
), "outputs must be of shape (n_rays, D)"
outputs.index_add_(0, ray_indices, src)
else:
outputs.add_(src.sum(dim=-2))
The provided code snippet includes necessary dependencies for implementing the `render_image_with_occgrid_test` function. Write a Python function `def render_image_with_occgrid_test( max_samples: int, # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, early_stop_eps: float = 1e-4, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, )` to solve the following problem:
Render the pixels of an image.
Here is the function:
def render_image_with_occgrid_test(
max_samples: int,
# scene
radiance_field: torch.nn.Module,
estimator: OccGridEstimator,
rays: Rays,
# rendering options
near_plane: float = 0.0,
far_plane: float = 1e10,
render_step_size: float = 1e-3,
render_bkgd: Optional[torch.Tensor] = None,
cone_angle: float = 0.0,
alpha_thre: float = 0.0,
early_stop_eps: float = 1e-4,
# only useful for dnerf
timestamps: Optional[torch.Tensor] = None,
):
"""Render the pixels of an image."""
rays_shape = rays.origins.shape
if len(rays_shape) == 3:
height, width, _ = rays_shape
num_rays = height * width
rays = namedtuple_map(
lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays
)
else:
num_rays, _ = rays_shape
def rgb_sigma_fn(t_starts, t_ends, ray_indices):
t_origins = rays.origins[ray_indices]
t_dirs = rays.viewdirs[ray_indices]
positions = (
t_origins + t_dirs * (t_starts[:, None] + t_ends[:, None]) / 2.0
)
if timestamps is not None:
# dnerf
t = (
timestamps[ray_indices]
if radiance_field.training
else timestamps.expand_as(positions[:, :1])
)
rgbs, sigmas = radiance_field(positions, t, t_dirs)
else:
rgbs, sigmas = radiance_field(positions, t_dirs)
return rgbs, sigmas.squeeze(-1)
device = rays.origins.device
opacity = torch.zeros(num_rays, 1, device=device)
depth = torch.zeros(num_rays, 1, device=device)
rgb = torch.zeros(num_rays, 3, device=device)
ray_mask = torch.ones(num_rays, device=device).bool()
# 1 for synthetic scenes, 4 for real scenes
min_samples = 1 if cone_angle == 0 else 4
iter_samples = total_samples = 0
rays_o = rays.origins
rays_d = rays.viewdirs
near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)
far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)
t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, estimator.aabbs)
n_grids = estimator.binaries.size(0)
if n_grids > 1:
t_sorted, t_indices = torch.sort(torch.cat([t_mins, t_maxs], -1), -1)
else:
t_sorted = torch.cat([t_mins, t_maxs], -1)
t_indices = torch.arange(
0, n_grids * 2, device=t_mins.device, dtype=torch.int64
).expand(num_rays, n_grids * 2)
opc_thre = 1 - early_stop_eps
while iter_samples < max_samples:
n_alive = ray_mask.sum().item()
if n_alive == 0:
break
# the number of samples to add on each ray
n_samples = max(min(num_rays // n_alive, 64), min_samples)
iter_samples += n_samples
# ray marching
(intervals, samples, termination_planes) = traverse_grids(
# rays
rays_o, # [n_rays, 3]
rays_d, # [n_rays, 3]
# grids
estimator.binaries, # [m, resx, resy, resz]
estimator.aabbs, # [m, 6]
# options
near_planes, # [n_rays]
far_planes, # [n_rays]
render_step_size,
cone_angle,
n_samples,
True,
ray_mask,
# pre-compute intersections
t_sorted, # [n_rays, m*2]
t_indices, # [n_rays, m*2]
hits, # [n_rays, m]
)
t_starts = intervals.vals[intervals.is_left]
t_ends = intervals.vals[intervals.is_right]
ray_indices = samples.ray_indices[samples.is_valid]
packed_info = samples.packed_info
# get rgb and sigma from radiance field
rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices)
# volume rendering using native cuda scan
weights, _, alphas = render_weight_from_density(
t_starts,
t_ends,
sigmas,
ray_indices=ray_indices,
n_rays=num_rays,
prefix_trans=1 - opacity[ray_indices].squeeze(-1),
)
if alpha_thre > 0:
vis_mask = alphas >= alpha_thre
ray_indices, rgbs, weights, t_starts, t_ends = (
ray_indices[vis_mask],
rgbs[vis_mask],
weights[vis_mask],
t_starts[vis_mask],
t_ends[vis_mask],
)
accumulate_along_rays_(
weights,
values=rgbs,
ray_indices=ray_indices,
outputs=rgb,
)
accumulate_along_rays_(
weights,
values=None,
ray_indices=ray_indices,
outputs=opacity,
)
accumulate_along_rays_(
weights,
values=(t_starts + t_ends)[..., None] / 2.0,
ray_indices=ray_indices,
outputs=depth,
)
# update near_planes using termination planes
near_planes = termination_planes
# update rays status
ray_mask = torch.logical_and(
# early stopping
opacity.view(-1) <= opc_thre,
# remove rays that have reached the far plane
packed_info[:, 1] == n_samples,
)
total_samples += ray_indices.shape[0]
rgb = rgb + render_bkgd * (1.0 - opacity)
depth = depth / opacity.clamp_min(torch.finfo(rgbs.dtype).eps)
return (
rgb.view((*rays_shape[:-1], -1)),
opacity.view((*rays_shape[:-1], -1)),
depth.view((*rays_shape[:-1], -1)),
total_samples,
) | Render the pixels of an image. |
189,544 | from typing import Callable, List, Union
import numpy as np
import torch
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
def contract_to_unisphere(
x: torch.Tensor,
aabb: torch.Tensor,
ord: Union[str, int] = 2,
# ord: Union[float, int] = float("inf"),
eps: float = 1e-6,
derivative: bool = False,
):
aabb_min, aabb_max = torch.split(aabb, 3, dim=-1)
x = (x - aabb_min) / (aabb_max - aabb_min)
x = x * 2 - 1 # aabb is at [-1, 1]
mag = torch.linalg.norm(x, ord=ord, dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
if derivative:
dev = (2 * mag - 1) / mag**2 + 2 * x**2 * (
1 / mag**3 - (2 * mag - 1) / mag**4
)
dev[~mask] = 1.0
dev = torch.clamp(dev, min=eps)
return dev
else:
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
return x | null |
189,545 | import argparse
import pathlib
import time
import imageio
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from datasets.nerf_synthetic import SubjectLoader
from lpips import LPIPS
from radiance_fields.mlp import VanillaNeRFRadianceField
from examples.utils import (
NERF_SYNTHETIC_SCENES,
render_image_with_occgrid,
set_random_seed,
)
from nerfacc.estimators.occ_grid import OccGridEstimator
render_step_size = 5e-3
radiance_field = VanillaNeRFRadianceField().to(device)
def occ_eval_fn(x):
density = radiance_field.query_density(x)
return density * render_step_size | null |
189,546 | import argparse
import math
import pathlib
import time
import imageio
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from lpips import LPIPS
from radiance_fields.ngp import NGPRadianceField
from examples.utils import (
MIPNERF360_UNBOUNDED_SCENES,
NERF_SYNTHETIC_SCENES,
render_image_with_occgrid,
render_image_with_occgrid_test,
set_random_seed,
)
from nerfacc.estimators.occ_grid import OccGridEstimator
class NGPRadianceField(torch.nn.Module):
"""Instance-NGP Radiance Field"""
def __init__(
self,
aabb: Union[torch.Tensor, List[float]],
num_dim: int = 3,
use_viewdirs: bool = True,
density_activation: Callable = lambda x: trunc_exp(x - 1),
unbounded: bool = False,
base_resolution: int = 16,
max_resolution: int = 4096,
geo_feat_dim: int = 15,
n_levels: int = 16,
log2_hashmap_size: int = 19,
) -> None:
super().__init__()
if not isinstance(aabb, torch.Tensor):
aabb = torch.tensor(aabb, dtype=torch.float32)
# Turns out rectangle aabb will leads to uneven collision so bad performance.
# We enforce a cube aabb here.
center = (aabb[..., :num_dim] + aabb[..., num_dim:]) / 2.0
size = (aabb[..., num_dim:] - aabb[..., :num_dim]).max()
aabb = torch.cat([center - size / 2.0, center + size / 2.0], dim=-1)
self.register_buffer("aabb", aabb)
self.num_dim = num_dim
self.use_viewdirs = use_viewdirs
self.density_activation = density_activation
self.unbounded = unbounded
self.base_resolution = base_resolution
self.max_resolution = max_resolution
self.geo_feat_dim = geo_feat_dim
self.n_levels = n_levels
self.log2_hashmap_size = log2_hashmap_size
per_level_scale = np.exp(
(np.log(max_resolution) - np.log(base_resolution)) / (n_levels - 1)
).tolist()
if self.use_viewdirs:
self.direction_encoding = tcnn.Encoding(
n_input_dims=num_dim,
encoding_config={
"otype": "Composite",
"nested": [
{
"n_dims_to_encode": 3,
"otype": "SphericalHarmonics",
"degree": 4,
},
# {"otype": "Identity", "n_bins": 4, "degree": 4},
],
},
)
self.mlp_base = tcnn.NetworkWithInputEncoding(
n_input_dims=num_dim,
n_output_dims=1 + self.geo_feat_dim,
encoding_config={
"otype": "HashGrid",
"n_levels": n_levels,
"n_features_per_level": 2,
"log2_hashmap_size": log2_hashmap_size,
"base_resolution": base_resolution,
"per_level_scale": per_level_scale,
},
network_config={
"otype": "FullyFusedMLP",
"activation": "ReLU",
"output_activation": "None",
"n_neurons": 64,
"n_hidden_layers": 1,
},
)
if self.geo_feat_dim > 0:
self.mlp_head = tcnn.Network(
n_input_dims=(
(
self.direction_encoding.n_output_dims
if self.use_viewdirs
else 0
)
+ self.geo_feat_dim
),
n_output_dims=3,
network_config={
"otype": "FullyFusedMLP",
"activation": "ReLU",
"output_activation": "None",
"n_neurons": 64,
"n_hidden_layers": 2,
},
)
def query_density(self, x, return_feat: bool = False):
if self.unbounded:
x = contract_to_unisphere(x, self.aabb)
else:
aabb_min, aabb_max = torch.split(self.aabb, self.num_dim, dim=-1)
x = (x - aabb_min) / (aabb_max - aabb_min)
selector = ((x > 0.0) & (x < 1.0)).all(dim=-1)
x = (
self.mlp_base(x.view(-1, self.num_dim))
.view(list(x.shape[:-1]) + [1 + self.geo_feat_dim])
.to(x)
)
density_before_activation, base_mlp_out = torch.split(
x, [1, self.geo_feat_dim], dim=-1
)
density = (
self.density_activation(density_before_activation)
* selector[..., None]
)
if return_feat:
return density, base_mlp_out
else:
return density
def _query_rgb(self, dir, embedding, apply_act: bool = True):
# tcnn requires directions in the range [0, 1]
if self.use_viewdirs:
dir = (dir + 1.0) / 2.0
d = self.direction_encoding(dir.reshape(-1, dir.shape[-1]))
h = torch.cat([d, embedding.reshape(-1, self.geo_feat_dim)], dim=-1)
else:
h = embedding.reshape(-1, self.geo_feat_dim)
rgb = (
self.mlp_head(h)
.reshape(list(embedding.shape[:-1]) + [3])
.to(embedding)
)
if apply_act:
rgb = torch.sigmoid(rgb)
return rgb
def forward(
self,
positions: torch.Tensor,
directions: torch.Tensor = None,
):
if self.use_viewdirs and (directions is not None):
assert (
positions.shape == directions.shape
), f"{positions.shape} v.s. {directions.shape}"
density, embedding = self.query_density(positions, return_feat=True)
rgb = self._query_rgb(directions, embedding=embedding)
return rgb, density # type: ignore
MIPNERF360_UNBOUNDED_SCENES = [
"garden",
"bicycle",
"bonsai",
"counter",
"kitchen",
"room",
"stump",
]
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def render_image_with_occgrid(
# scene
radiance_field: torch.nn.Module,
estimator: OccGridEstimator,
rays: Rays,
# rendering options
near_plane: float = 0.0,
far_plane: float = 1e10,
render_step_size: float = 1e-3,
render_bkgd: Optional[torch.Tensor] = None,
cone_angle: float = 0.0,
alpha_thre: float = 0.0,
# test options
test_chunk_size: int = 8192,
# only useful for dnerf
timestamps: Optional[torch.Tensor] = None,
):
"""Render the pixels of an image."""
rays_shape = rays.origins.shape
if len(rays_shape) == 3:
height, width, _ = rays_shape
num_rays = height * width
rays = namedtuple_map(
lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays
)
else:
num_rays, _ = rays_shape
results = []
chunk = (
torch.iinfo(torch.int32).max
if radiance_field.training
else test_chunk_size
)
for i in range(0, num_rays, chunk):
chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays)
rays_o = chunk_rays.origins
rays_d = chunk_rays.viewdirs
def sigma_fn(t_starts, t_ends, ray_indices):
if t_starts.shape[0] == 0:
sigmas = torch.empty((0, 1), device=t_starts.device)
else:
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = (
t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0
)
if timestamps is not None:
# dnerf
t = (
timestamps[ray_indices]
if radiance_field.training
else timestamps.expand_as(positions[:, :1])
)
sigmas = radiance_field.query_density(positions, t)
else:
sigmas = radiance_field.query_density(positions)
return sigmas.squeeze(-1)
def rgb_sigma_fn(t_starts, t_ends, ray_indices):
if t_starts.shape[0] == 0:
rgbs = torch.empty((0, 3), device=t_starts.device)
sigmas = torch.empty((0, 1), device=t_starts.device)
else:
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = (
t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0
)
if timestamps is not None:
# dnerf
t = (
timestamps[ray_indices]
if radiance_field.training
else timestamps.expand_as(positions[:, :1])
)
rgbs, sigmas = radiance_field(positions, t, t_dirs)
else:
rgbs, sigmas = radiance_field(positions, t_dirs)
return rgbs, sigmas.squeeze(-1)
ray_indices, t_starts, t_ends = estimator.sampling(
rays_o,
rays_d,
sigma_fn=sigma_fn,
near_plane=near_plane,
far_plane=far_plane,
render_step_size=render_step_size,
stratified=radiance_field.training,
cone_angle=cone_angle,
alpha_thre=alpha_thre,
)
rgb, opacity, depth, extras = rendering(
t_starts,
t_ends,
ray_indices,
n_rays=rays_o.shape[0],
rgb_sigma_fn=rgb_sigma_fn,
render_bkgd=render_bkgd,
)
chunk_results = [rgb, opacity, depth, len(t_starts)]
results.append(chunk_results)
colors, opacities, depths, n_rendering_samples = [
torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r
for r in zip(*results)
]
return (
colors.view((*rays_shape[:-1], -1)),
opacities.view((*rays_shape[:-1], -1)),
depths.view((*rays_shape[:-1], -1)),
sum(n_rendering_samples),
)
class OccGridEstimator(AbstractEstimator):
"""Occupancy grid transmittance estimator for spatial skipping.
References: "Instant Neural Graphics Primitives."
Args:
roi_aabb: The axis-aligned bounding box of the region of interest. Useful for mapping
the 3D space to the grid.
resolution: The resolution of the grid. If an integer is given, the grid is assumed to
be a cube. Otherwise, a list or a tensor of shape (3,) is expected. Default: 128.
levels: The number of levels of the grid. Default: 1.
"""
DIM: int = 3
def __init__(
self,
roi_aabb: Union[List[int], Tensor],
resolution: Union[int, List[int], Tensor] = 128,
levels: int = 1,
**kwargs,
) -> None:
super().__init__()
if "contraction_type" in kwargs:
raise ValueError(
"`contraction_type` is not supported anymore for nerfacc >= 0.4.0."
)
# check the resolution is legal
if isinstance(resolution, int):
resolution = [resolution] * self.DIM
if isinstance(resolution, (list, tuple)):
resolution = torch.tensor(resolution, dtype=torch.int32)
assert isinstance(resolution, Tensor), f"Invalid type: {resolution}!"
assert resolution.shape[0] == self.DIM, f"Invalid shape: {resolution}!"
# check the roi_aabb is legal
if isinstance(roi_aabb, (list, tuple)):
roi_aabb = torch.tensor(roi_aabb, dtype=torch.float32)
assert isinstance(roi_aabb, Tensor), f"Invalid type: {roi_aabb}!"
assert roi_aabb.shape[0] == self.DIM * 2, f"Invalid shape: {roi_aabb}!"
# multiple levels of aabbs
aabbs = torch.stack(
[_enlarge_aabb(roi_aabb, 2**i) for i in range(levels)], dim=0
)
# total number of voxels
self.cells_per_lvl = int(resolution.prod().item())
self.levels = levels
# Buffers
self.register_buffer("resolution", resolution) # [3]
self.register_buffer("aabbs", aabbs) # [n_aabbs, 6]
self.register_buffer(
"occs", torch.zeros(self.levels * self.cells_per_lvl)
)
self.register_buffer(
"binaries",
torch.zeros([levels] + resolution.tolist(), dtype=torch.bool),
)
# Grid coords & indices
grid_coords = _meshgrid3d(resolution).reshape(
self.cells_per_lvl, self.DIM
)
self.register_buffer("grid_coords", grid_coords, persistent=False)
grid_indices = torch.arange(self.cells_per_lvl)
self.register_buffer("grid_indices", grid_indices, persistent=False)
def sampling(
self,
# rays
rays_o: Tensor, # [n_rays, 3]
rays_d: Tensor, # [n_rays, 3]
# sigma/alpha function for skipping invisible space
sigma_fn: Optional[Callable] = None,
alpha_fn: Optional[Callable] = None,
near_plane: float = 0.0,
far_plane: float = 1e10,
t_min: Optional[Tensor] = None, # [n_rays]
t_max: Optional[Tensor] = None, # [n_rays]
# rendering options
render_step_size: float = 1e-3,
early_stop_eps: float = 1e-4,
alpha_thre: float = 0.0,
stratified: bool = False,
cone_angle: float = 0.0,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Sampling with spatial skipping.
Note:
This function is not differentiable to any inputs.
Args:
rays_o: Ray origins of shape (n_rays, 3).
rays_d: Normalized ray directions of shape (n_rays, 3).
sigma_fn: Optional. If provided, the marching will skip the invisible space
by evaluating the density along the ray with `sigma_fn`. It should be a
function that takes in samples {t_starts (N,), t_ends (N,),
ray indices (N,)} and returns the post-activation density values (N,).
You should only provide either `sigma_fn` or `alpha_fn`.
alpha_fn: Optional. If provided, the marching will skip the invisible space
by evaluating the density along the ray with `alpha_fn`. It should be a
function that takes in samples {t_starts (N,), t_ends (N,),
ray indices (N,)} and returns the post-activation opacity values (N,).
You should only provide either `sigma_fn` or `alpha_fn`.
near_plane: Optional. Near plane distance. Default: 0.0.
far_plane: Optional. Far plane distance. Default: 1e10.
t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).
If provided, the marching will start from maximum of t_min and near_plane.
t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).
If provided, the marching will stop by minimum of t_max and far_plane.
render_step_size: Step size for marching. Default: 1e-3.
early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.
alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.
stratified: Whether to use stratified sampling. Default: False.
cone_angle: Cone angle for linearly-increased step size. 0. means
constant step size. Default: 0.0.
Returns:
A tuple of {LongTensor, Tensor, Tensor}:
- **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).
- **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).
- **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).
Examples:
.. code-block:: python
>>> ray_indices, t_starts, t_ends = grid.sampling(
>>> rays_o, rays_d, render_step_size=1e-3)
>>> t_mid = (t_starts + t_ends) / 2.0
>>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]
"""
near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)
far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)
if t_min is not None:
near_planes = torch.clamp(near_planes, min=t_min)
if t_max is not None:
far_planes = torch.clamp(far_planes, max=t_max)
if stratified:
near_planes += torch.rand_like(near_planes) * render_step_size
intervals, samples, _ = traverse_grids(
rays_o,
rays_d,
self.binaries,
self.aabbs,
near_planes=near_planes,
far_planes=far_planes,
step_size=render_step_size,
cone_angle=cone_angle,
)
t_starts = intervals.vals[intervals.is_left]
t_ends = intervals.vals[intervals.is_right]
ray_indices = samples.ray_indices
packed_info = samples.packed_info
# skip invisible space
if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (
sigma_fn is not None or alpha_fn is not None
):
alpha_thre = min(alpha_thre, self.occs.mean().item())
# Compute visibility of the samples, and filter out invisible samples
if sigma_fn is not None:
if t_starts.shape[0] != 0:
sigmas = sigma_fn(t_starts, t_ends, ray_indices)
else:
sigmas = torch.empty((0,), device=t_starts.device)
assert (
sigmas.shape == t_starts.shape
), "sigmas must have shape of (N,)! Got {}".format(sigmas.shape)
masks = render_visibility_from_density(
t_starts=t_starts,
t_ends=t_ends,
sigmas=sigmas,
packed_info=packed_info,
early_stop_eps=early_stop_eps,
alpha_thre=alpha_thre,
)
elif alpha_fn is not None:
if t_starts.shape[0] != 0:
alphas = alpha_fn(t_starts, t_ends, ray_indices)
else:
alphas = torch.empty((0,), device=t_starts.device)
assert (
alphas.shape == t_starts.shape
), "alphas must have shape of (N,)! Got {}".format(alphas.shape)
masks = render_visibility_from_alpha(
alphas=alphas,
packed_info=packed_info,
early_stop_eps=early_stop_eps,
alpha_thre=alpha_thre,
)
ray_indices, t_starts, t_ends = (
ray_indices[masks],
t_starts[masks],
t_ends[masks],
)
return ray_indices, t_starts, t_ends
def update_every_n_steps(
self,
step: int,
occ_eval_fn: Callable,
occ_thre: float = 1e-2,
ema_decay: float = 0.95,
warmup_steps: int = 256,
n: int = 16,
) -> None:
"""Update the estimator every n steps during training.
Args:
step: Current training step.
occ_eval_fn: A function that takes in sample locations :math:`(N, 3)` and
returns the occupancy values :math:`(N, 1)` at those locations.
occ_thre: Threshold used to binarize the occupancy grid. Default: 1e-2.
ema_decay: The decay rate for EMA updates. Default: 0.95.
warmup_steps: Sample all cells during the warmup stage. After the warmup
stage we change the sampling strategy to 1/4 uniformly sampled cells
together with 1/4 occupied cells. Default: 256.
n: Update the grid every n steps. Default: 16.
"""
if not self.training:
raise RuntimeError(
"You should only call this function only during training. "
"Please call _update() directly if you want to update the "
"field during inference."
)
if step % n == 0 and self.training:
self._update(
step=step,
occ_eval_fn=occ_eval_fn,
occ_thre=occ_thre,
ema_decay=ema_decay,
warmup_steps=warmup_steps,
)
# adapted from https://github.com/kwea123/ngp_pl/blob/master/models/networks.py
def mark_invisible_cells(
self,
K: Tensor,
c2w: Tensor,
width: int,
height: int,
near_plane: float = 0.0,
chunk: int = 32**3,
) -> None:
"""Mark the cells that aren't covered by the cameras with density -1.
Should only be executed once before training starts.
Args:
K: Camera intrinsics of shape (N, 3, 3) or (1, 3, 3).
c2w: Camera to world poses of shape (N, 3, 4) or (N, 4, 4).
width: Image width in pixels
height: Image height in pixels
near_plane: Near plane distance
chunk: The chunk size to split the cells (to avoid OOM)
"""
assert K.dim() == 3 and K.shape[1:] == (3, 3)
assert c2w.dim() == 3 and (
c2w.shape[1:] == (3, 4) or c2w.shape[1:] == (4, 4)
)
assert K.shape[0] == c2w.shape[0] or K.shape[0] == 1
N_cams = c2w.shape[0]
w2c_R = c2w[:, :3, :3].transpose(2, 1) # (N_cams, 3, 3)
w2c_T = -w2c_R @ c2w[:, :3, 3:] # (N_cams, 3, 1)
lvl_indices = self._get_all_cells()
for lvl, indices in enumerate(lvl_indices):
grid_coords = self.grid_coords[indices]
for i in range(0, len(indices), chunk):
x = grid_coords[i : i + chunk] / (self.resolution - 1)
indices_chunk = indices[i : i + chunk]
# voxel coordinates [0, 1]^3 -> world
xyzs_w = (
self.aabbs[lvl, :3]
+ x * (self.aabbs[lvl, 3:] - self.aabbs[lvl, :3])
).T
xyzs_c = w2c_R @ xyzs_w + w2c_T # (N_cams, 3, chunk)
uvd = K @ xyzs_c # (N_cams, 3, chunk)
uv = uvd[:, :2] / uvd[:, 2:] # (N_cams, 2, chunk)
in_image = (
(uvd[:, 2] >= 0)
& (uv[:, 0] >= 0)
& (uv[:, 0] < width)
& (uv[:, 1] >= 0)
& (uv[:, 1] < height)
)
covered_by_cam = (
uvd[:, 2] >= near_plane
) & in_image # (N_cams, chunk)
# if the cell is visible by at least one camera
count = covered_by_cam.sum(0) / N_cams
too_near_to_cam = (
uvd[:, 2] < near_plane
) & in_image # (N, chunk)
# if the cell is too close (in front) to any camera
too_near_to_any_cam = too_near_to_cam.any(0)
# a valid cell should be visible by at least one camera and not too close to any camera
valid_mask = (count > 0) & (~too_near_to_any_cam)
cell_ids_base = lvl * self.cells_per_lvl
self.occs[cell_ids_base + indices_chunk] = torch.where(
valid_mask, 0.0, -1.0
)
def _get_all_cells(self) -> List[Tensor]:
"""Returns all cells of the grid."""
lvl_indices = []
for lvl in range(self.levels):
# filter out the cells with -1 density (non-visible to any camera)
cell_ids = lvl * self.cells_per_lvl + self.grid_indices
indices = self.grid_indices[self.occs[cell_ids] >= 0.0]
lvl_indices.append(indices)
return lvl_indices
def _sample_uniform_and_occupied_cells(self, n: int) -> List[Tensor]:
"""Samples both n uniform and occupied cells."""
lvl_indices = []
for lvl in range(self.levels):
uniform_indices = torch.randint(
self.cells_per_lvl, (n,), device=self.device
)
# filter out the cells with -1 density (non-visible to any camera)
cell_ids = lvl * self.cells_per_lvl + uniform_indices
uniform_indices = uniform_indices[self.occs[cell_ids] >= 0.0]
occupied_indices = torch.nonzero(self.binaries[lvl].flatten())[:, 0]
if n < len(occupied_indices):
selector = torch.randint(
len(occupied_indices), (n,), device=self.device
)
occupied_indices = occupied_indices[selector]
indices = torch.cat([uniform_indices, occupied_indices], dim=0)
lvl_indices.append(indices)
return lvl_indices
def _update(
self,
step: int,
occ_eval_fn: Callable,
occ_thre: float = 0.01,
ema_decay: float = 0.95,
warmup_steps: int = 256,
) -> None:
"""Update the occ field in the EMA way."""
# sample cells
if step < warmup_steps:
lvl_indices = self._get_all_cells()
else:
N = self.cells_per_lvl // 4
lvl_indices = self._sample_uniform_and_occupied_cells(N)
for lvl, indices in enumerate(lvl_indices):
# infer occupancy: density * step_size
grid_coords = self.grid_coords[indices]
x = (
grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)
) / self.resolution
# voxel coordinates [0, 1]^3 -> world
x = self.aabbs[lvl, :3] + x * (
self.aabbs[lvl, 3:] - self.aabbs[lvl, :3]
)
occ = occ_eval_fn(x).squeeze(-1)
# ema update
cell_ids = lvl * self.cells_per_lvl + indices
self.occs[cell_ids] = torch.maximum(
self.occs[cell_ids] * ema_decay, occ
)
# suppose to use scatter max but emperically it is almost the same.
# self.occs, _ = scatter_max(
# occ, indices, dim=0, out=self.occs * ema_decay
# )
thre = torch.clamp(self.occs[self.occs >= 0].mean(), max=occ_thre)
self.binaries = (self.occs > thre).view(self.binaries.shape)
class SubjectLoader(torch.utils.data.Dataset):
"""Single subject data loader for training and evaluation."""
SPLITS = ["train", "test"]
SUBJECT_IDS = [
"garden",
"bicycle",
"bonsai",
"counter",
"kitchen",
"room",
"stump",
]
OPENGL_CAMERA = False
def __init__(
self,
subject_id: str,
root_fp: str,
split: str,
color_bkgd_aug: str = "white",
num_rays: int = None,
near: float = None,
far: float = None,
batch_over_images: bool = True,
factor: int = 1,
device: str = "cpu",
):
super().__init__()
assert split in self.SPLITS, "%s" % split
assert subject_id in self.SUBJECT_IDS, "%s" % subject_id
assert color_bkgd_aug in ["white", "black", "random"]
self.split = split
self.num_rays = num_rays
self.near = near
self.far = far
self.training = (num_rays is not None) and (
split in ["train", "trainval"]
)
self.color_bkgd_aug = color_bkgd_aug
self.batch_over_images = batch_over_images
self.images, self.camtoworlds, self.K, split_indices = _load_colmap(
root_fp, subject_id, factor
)
# normalize the scene
T, sscale = similarity_from_cameras(
self.camtoworlds, strict_scaling=False
)
self.camtoworlds = np.einsum("nij, ki -> nkj", self.camtoworlds, T)
self.camtoworlds[:, :3, 3] *= sscale
# split
indices = split_indices[split]
self.images = self.images[indices]
self.camtoworlds = self.camtoworlds[indices]
# to tensor
self.images = torch.from_numpy(self.images).to(torch.uint8).to(device)
self.camtoworlds = (
torch.from_numpy(self.camtoworlds).to(torch.float32).to(device)
)
self.K = torch.tensor(self.K).to(torch.float32).to(device)
self.height, self.width = self.images.shape[1:3]
self.g = torch.Generator(device=device)
self.g.manual_seed(42)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
data = self.fetch_data(index)
data = self.preprocess(data)
return data
def preprocess(self, data):
"""Process the fetched / cached data with randomness."""
pixels, rays = data["rgb"], data["rays"]
if self.training:
if self.color_bkgd_aug == "random":
color_bkgd = torch.rand(
3, device=self.images.device, generator=self.g
)
elif self.color_bkgd_aug == "white":
color_bkgd = torch.ones(3, device=self.images.device)
elif self.color_bkgd_aug == "black":
color_bkgd = torch.zeros(3, device=self.images.device)
else:
# just use white during inference
color_bkgd = torch.ones(3, device=self.images.device)
return {
"pixels": pixels, # [n_rays, 3] or [h, w, 3]
"rays": rays, # [n_rays,] or [h, w]
"color_bkgd": color_bkgd, # [3,]
**{k: v for k, v in data.items() if k not in ["rgb", "rays"]},
}
def update_num_rays(self, num_rays):
self.num_rays = num_rays
def fetch_data(self, index):
"""Fetch the data (it maybe cached for multiple batches)."""
num_rays = self.num_rays
if self.training:
if self.batch_over_images:
image_id = torch.randint(
0,
len(self.images),
size=(num_rays,),
device=self.images.device,
generator=self.g,
)
else:
image_id = [index] * num_rays
x = torch.randint(
0,
self.width,
size=(num_rays,),
device=self.images.device,
generator=self.g,
)
y = torch.randint(
0,
self.height,
size=(num_rays,),
device=self.images.device,
generator=self.g,
)
else:
image_id = [index]
x, y = torch.meshgrid(
torch.arange(self.width, device=self.images.device),
torch.arange(self.height, device=self.images.device),
indexing="xy",
)
x = x.flatten()
y = y.flatten()
# generate rays
rgb = self.images[image_id, y, x] / 255.0 # (num_rays, 3)
c2w = self.camtoworlds[image_id] # (num_rays, 3, 4)
camera_dirs = F.pad(
torch.stack(
[
(x - self.K[0, 2] + 0.5) / self.K[0, 0],
(y - self.K[1, 2] + 0.5)
/ self.K[1, 1]
* (-1.0 if self.OPENGL_CAMERA else 1.0),
],
dim=-1,
),
(0, 1),
value=(-1.0 if self.OPENGL_CAMERA else 1.0),
) # [num_rays, 3]
# [num_rays, 3]
directions = (camera_dirs[:, None, :] * c2w[:, :3, :3]).sum(dim=-1)
origins = torch.broadcast_to(c2w[:, :3, -1], directions.shape)
viewdirs = directions / torch.linalg.norm(
directions, dim=-1, keepdims=True
)
if self.training:
origins = torch.reshape(origins, (num_rays, 3))
viewdirs = torch.reshape(viewdirs, (num_rays, 3))
rgb = torch.reshape(rgb, (num_rays, 3))
else:
origins = torch.reshape(origins, (self.height, self.width, 3))
viewdirs = torch.reshape(viewdirs, (self.height, self.width, 3))
rgb = torch.reshape(rgb, (self.height, self.width, 3))
rays = Rays(origins=origins, viewdirs=viewdirs)
return {
"rgb": rgb, # [h, w, 3] or [num_rays, 3]
"rays": rays, # [h, w, 3] or [num_rays, 3]
}
class SubjectLoader(torch.utils.data.Dataset):
"""Single subject data loader for training and evaluation."""
SPLITS = ["train", "val", "trainval", "test"]
SUBJECT_IDS = [
"chair",
"drums",
"ficus",
"hotdog",
"lego",
"materials",
"mic",
"ship",
]
WIDTH, HEIGHT = 800, 800
NEAR, FAR = 2.0, 6.0
OPENGL_CAMERA = True
def __init__(
self,
subject_id: str,
root_fp: str,
split: str,
color_bkgd_aug: str = "white",
num_rays: int = None,
near: float = None,
far: float = None,
batch_over_images: bool = True,
device: torch.device = torch.device("cpu"),
):
super().__init__()
assert split in self.SPLITS, "%s" % split
assert subject_id in self.SUBJECT_IDS, "%s" % subject_id
assert color_bkgd_aug in ["white", "black", "random"]
self.split = split
self.num_rays = num_rays
self.near = self.NEAR if near is None else near
self.far = self.FAR if far is None else far
self.training = (num_rays is not None) and (
split in ["train", "trainval"]
)
self.color_bkgd_aug = color_bkgd_aug
self.batch_over_images = batch_over_images
if split == "trainval":
_images_train, _camtoworlds_train, _focal_train = _load_renderings(
root_fp, subject_id, "train"
)
_images_val, _camtoworlds_val, _focal_val = _load_renderings(
root_fp, subject_id, "val"
)
self.images = np.concatenate([_images_train, _images_val])
self.camtoworlds = np.concatenate(
[_camtoworlds_train, _camtoworlds_val]
)
self.focal = _focal_train
else:
self.images, self.camtoworlds, self.focal = _load_renderings(
root_fp, subject_id, split
)
self.images = torch.from_numpy(self.images).to(torch.uint8)
self.camtoworlds = torch.from_numpy(self.camtoworlds).to(torch.float32)
self.K = torch.tensor(
[
[self.focal, 0, self.WIDTH / 2.0],
[0, self.focal, self.HEIGHT / 2.0],
[0, 0, 1],
],
dtype=torch.float32,
) # (3, 3)
self.images = self.images.to(device)
self.camtoworlds = self.camtoworlds.to(device)
self.K = self.K.to(device)
assert self.images.shape[1:3] == (self.HEIGHT, self.WIDTH)
self.g = torch.Generator(device=device)
self.g.manual_seed(42)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
data = self.fetch_data(index)
data = self.preprocess(data)
return data
def preprocess(self, data):
"""Process the fetched / cached data with randomness."""
rgba, rays = data["rgba"], data["rays"]
pixels, alpha = torch.split(rgba, [3, 1], dim=-1)
if self.training:
if self.color_bkgd_aug == "random":
color_bkgd = torch.rand(
3, device=self.images.device, generator=self.g
)
elif self.color_bkgd_aug == "white":
color_bkgd = torch.ones(3, device=self.images.device)
elif self.color_bkgd_aug == "black":
color_bkgd = torch.zeros(3, device=self.images.device)
else:
# just use white during inference
color_bkgd = torch.ones(3, device=self.images.device)
pixels = pixels * alpha + color_bkgd * (1.0 - alpha)
return {
"pixels": pixels, # [n_rays, 3] or [h, w, 3]
"rays": rays, # [n_rays,] or [h, w]
"color_bkgd": color_bkgd, # [3,]
**{k: v for k, v in data.items() if k not in ["rgba", "rays"]},
}
def update_num_rays(self, num_rays):
self.num_rays = num_rays
def fetch_data(self, index):
"""Fetch the data (it maybe cached for multiple batches)."""
num_rays = self.num_rays
if self.training:
if self.batch_over_images:
image_id = torch.randint(
0,
len(self.images),
size=(num_rays,),
device=self.images.device,
generator=self.g,
)
else:
image_id = [index] * num_rays
x = torch.randint(
0,
self.WIDTH,
size=(num_rays,),
device=self.images.device,
generator=self.g,
)
y = torch.randint(
0,
self.HEIGHT,
size=(num_rays,),
device=self.images.device,
generator=self.g,
)
else:
image_id = [index]
x, y = torch.meshgrid(
torch.arange(self.WIDTH, device=self.images.device),
torch.arange(self.HEIGHT, device=self.images.device),
indexing="xy",
)
x = x.flatten()
y = y.flatten()
# generate rays
rgba = self.images[image_id, y, x] / 255.0 # (num_rays, 4)
c2w = self.camtoworlds[image_id] # (num_rays, 3, 4)
camera_dirs = F.pad(
torch.stack(
[
(x - self.K[0, 2] + 0.5) / self.K[0, 0],
(y - self.K[1, 2] + 0.5)
/ self.K[1, 1]
* (-1.0 if self.OPENGL_CAMERA else 1.0),
],
dim=-1,
),
(0, 1),
value=(-1.0 if self.OPENGL_CAMERA else 1.0),
) # [num_rays, 3]
# [n_cams, height, width, 3]
directions = (camera_dirs[:, None, :] * c2w[:, :3, :3]).sum(dim=-1)
origins = torch.broadcast_to(c2w[:, :3, -1], directions.shape)
viewdirs = directions / torch.linalg.norm(
directions, dim=-1, keepdims=True
)
if self.training:
origins = torch.reshape(origins, (num_rays, 3))
viewdirs = torch.reshape(viewdirs, (num_rays, 3))
rgba = torch.reshape(rgba, (num_rays, 4))
else:
origins = torch.reshape(origins, (self.HEIGHT, self.WIDTH, 3))
viewdirs = torch.reshape(viewdirs, (self.HEIGHT, self.WIDTH, 3))
rgba = torch.reshape(rgba, (self.HEIGHT, self.WIDTH, 4))
rays = Rays(origins=origins, viewdirs=viewdirs)
return {
"rgba": rgba, # [h, w, 4] or [num_rays, 4]
"rays": rays, # [h, w, 3] or [num_rays, 3]
}
class VDBEstimator(AbstractEstimator):
"""Occupancy Estimator Using A VDB."""
def __init__(self, init_grid: GridBatch, device="cuda:0") -> None:
super().__init__()
assert fVDB_ENABLED, "Please install fVDB to use this class."
assert len(init_grid) == 1, "Only support one grid for now."
# Create a mutable grid from the initial grid.
self.grid = sparse_grid_from_ijk(
init_grid.ijk,
voxel_sizes=init_grid.voxel_sizes,
origins=init_grid.origins,
mutable=True,
).to(device)
# The buffer for float occupancy values
self.occs = torch.nn.Parameter(
torch.zeros([self.grid.total_voxels], device=device),
requires_grad=False,
)
def state_dict(self):
state_dict = super().state_dict()
state_dict["grid"] = self.grid
state_dict["occs"] = self.occs.state_dict()
return state_dict
def load_state_dict(
self, state_dict: Mapping[str, Any], strict: bool = True
):
init_grid = state_dict["grid"]
self.grid = sparse_grid_from_ijk(
init_grid.ijk,
voxel_sizes=init_grid.voxel_sizes,
origins=init_grid.origins,
mutable=True,
)
remaining_state_dict = {
k: v for k, v in state_dict.items() if k not in ["grid", "occs"]
}
super().load_state_dict(remaining_state_dict, strict=strict)
def to(self, device: Union[str, torch.device]):
self.grid = self.grid.to(device)
self.occs = self.occs.to(device)
super().to(device)
return self
def sampling(
self,
# rays
rays_o: Tensor, # [n_rays, 3]
rays_d: Tensor, # [n_rays, 3]
# sigma/alpha function for skipping invisible space
sigma_fn: Optional[Callable] = None,
alpha_fn: Optional[Callable] = None,
near_plane: float = 0.0,
far_plane: float = 1e10,
t_min: Optional[Tensor] = None, # [n_rays]
t_max: Optional[Tensor] = None, # [n_rays]
# rendering options
render_step_size: float = 1e-3,
early_stop_eps: float = 1e-4,
alpha_thre: float = 0.0,
stratified: bool = False,
cone_angle: float = 0.0,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Sampling with spatial skipping.
Note:
This function is not differentiable to any inputs.
Args:
rays_o: Ray origins of shape (n_rays, 3).
rays_d: Normalized ray directions of shape (n_rays, 3).
sigma_fn: Optional. If provided, the marching will skip the invisible space
by evaluating the density along the ray with `sigma_fn`. It should be a
function that takes in samples {t_starts (N,), t_ends (N,),
ray indices (N,)} and returns the post-activation density values (N,).
You should only provide either `sigma_fn` or `alpha_fn`.
alpha_fn: Optional. If provided, the marching will skip the invisible space
by evaluating the density along the ray with `alpha_fn`. It should be a
function that takes in samples {t_starts (N,), t_ends (N,),
ray indices (N,)} and returns the post-activation opacity values (N,).
You should only provide either `sigma_fn` or `alpha_fn`.
near_plane: Optional. Near plane distance. Default: 0.0.
far_plane: Optional. Far plane distance. Default: 1e10.
t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).
If provided, the marching will start from maximum of t_min and near_plane.
t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).
If provided, the marching will stop by minimum of t_max and far_plane.
render_step_size: Step size for marching. Default: 1e-3.
early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.
alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.
stratified: Whether to use stratified sampling. Default: False.
cone_angle: Cone angle for linearly-increased step size. 0. means
constant step size. Default: 0.0.
Returns:
A tuple of {LongTensor, Tensor, Tensor}:
- **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).
- **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).
- **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).
Examples:
.. code-block:: python
>>> ray_indices, t_starts, t_ends = grid.sampling(
>>> rays_o, rays_d, render_step_size=1e-3)
>>> t_mid = (t_starts + t_ends) / 2.0
>>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]
"""
near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)
far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)
if t_min is not None:
near_planes = torch.clamp(near_planes, min=t_min)
if t_max is not None:
far_planes = torch.clamp(far_planes, max=t_max)
if stratified:
near_planes += torch.rand_like(near_planes) * render_step_size
t_starts, t_ends, ray_indices = traverse_vdbs(
rays_o,
rays_d,
self.grid,
near_planes=near_planes,
far_planes=far_planes,
step_size=render_step_size,
cone_angle=cone_angle,
)
# skip invisible space
if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (
sigma_fn is not None or alpha_fn is not None
):
alpha_thre = min(alpha_thre, self.occs.mean().item())
n_rays = rays_o.shape[0]
# Compute visibility of the samples, and filter out invisible samples
if sigma_fn is not None:
if t_starts.shape[0] != 0:
sigmas = sigma_fn(t_starts, t_ends, ray_indices)
else:
sigmas = torch.empty((0,), device=t_starts.device)
assert (
sigmas.shape == t_starts.shape
), "sigmas must have shape of (N,)! Got {}".format(sigmas.shape)
masks = render_visibility_from_density(
t_starts=t_starts,
t_ends=t_ends,
sigmas=sigmas,
ray_indices=ray_indices,
n_rays=n_rays,
early_stop_eps=early_stop_eps,
alpha_thre=alpha_thre,
)
elif alpha_fn is not None:
if t_starts.shape[0] != 0:
alphas = alpha_fn(t_starts, t_ends, ray_indices)
else:
alphas = torch.empty((0,), device=t_starts.device)
assert (
alphas.shape == t_starts.shape
), "alphas must have shape of (N,)! Got {}".format(alphas.shape)
masks = render_visibility_from_alpha(
alphas=alphas,
ray_indices=ray_indices,
n_rays=n_rays,
early_stop_eps=early_stop_eps,
alpha_thre=alpha_thre,
)
ray_indices, t_starts, t_ends = (
ray_indices[masks],
t_starts[masks],
t_ends[masks],
)
return ray_indices, t_starts, t_ends
def update_every_n_steps(
self,
step: int,
occ_eval_fn: Callable,
occ_thre: float = 1e-2,
ema_decay: float = 0.95,
warmup_steps: int = 256,
n: int = 16,
) -> None:
"""Update the estimator every n steps during training.
Args:
step: Current training step.
occ_eval_fn: A function that takes in sample locations :math:`(N, 3)` and
returns the occupancy values :math:`(N, 1)` at those locations.
occ_thre: Threshold used to binarize the occupancy grid. Default: 1e-2.
ema_decay: The decay rate for EMA updates. Default: 0.95.
warmup_steps: Sample all cells during the warmup stage. After the warmup
stage we change the sampling strategy to 1/4 uniformly sampled cells
together with 1/4 occupied cells. Default: 256.
n: Update the grid every n steps. Default: 16.
"""
if not self.training:
raise RuntimeError(
"You should only call this function only during training. "
"Please call _update() directly if you want to update the "
"field during inference."
)
if step % n == 0 and self.training:
self._update(
step=step,
occ_eval_fn=occ_eval_fn,
occ_thre=occ_thre,
ema_decay=ema_decay,
warmup_steps=warmup_steps,
)
def _get_all_cells(self) -> List[Tensor]:
"""Returns all cells of the grid."""
return self.grid.ijk.jdata
def _sample_uniform_and_occupied_cells(self) -> List[Tensor]:
"""Samples both n uniform and occupied cells."""
n = self.grid.total_voxels // 4
uniform_selector = torch.randint(
0, self.grid.total_voxels, (n,), device=self.device
)
uniform_ijks = self.grid.ijk.jdata[uniform_selector]
occupied_ijks = self.grid.ijk_enabled.jdata
if n < len(occupied_ijks):
occupied_selector = torch.randint(
0, len(occupied_ijks), (n,), device=self.device
)
occupied_ijks = occupied_ijks[occupied_selector]
ijks = torch.cat([uniform_ijks, occupied_ijks], dim=0)
return ijks
def _update(
self,
step: int,
occ_eval_fn: Callable,
occ_thre: float = 0.01,
ema_decay: float = 0.95,
warmup_steps: int = 256,
) -> None:
"""Update the occ field in the EMA way."""
# sample cells
if step < warmup_steps:
ijks = self._get_all_cells()
else:
ijks = self._sample_uniform_and_occupied_cells()
# update the occ buffer
grid_coords = ijks - 0.5 + torch.rand_like(ijks, dtype=torch.float32)
x = self.grid.grid_to_world(grid_coords).jdata
occ = occ_eval_fn(x).squeeze(-1)
index = self.grid.ijk_to_index(ijks).jdata
self.occs[index] = torch.maximum(self.occs[index] * ema_decay, occ)
# update the grid
thre = torch.clamp(self.occs.mean(), max=occ_thre)
active = self.occs[index] >= thre
_ijks = ijks[active]
if len(_ijks) > 0:
self.grid.enable_ijk(_ijks)
_ijks = ijks[~active]
if len(_ijks) > 0:
self.grid.disable_ijk(_ijks)
def run(args):
device = "cuda:0"
set_random_seed(42)
if args.scene in MIPNERF360_UNBOUNDED_SCENES:
from datasets.nerf_360_v2 import SubjectLoader
# training parameters
max_steps = 20000
init_batch_size = 1024
target_sample_batch_size = 1 << 18
weight_decay = 0.0
# scene parameters
aabb = torch.tensor([-1.0, -1.0, -1.0, 1.0, 1.0, 1.0], device=device)
near_plane = 0.2
far_plane = 1.0e10
# dataset parameters
train_dataset_kwargs = {"color_bkgd_aug": "random", "factor": 4}
test_dataset_kwargs = {"factor": 4}
# model parameters
grid_resolution = 128
grid_nlvl = 4
# render parameters
render_step_size = 1e-3
alpha_thre = 1e-2
cone_angle = 0.004
else:
from datasets.nerf_synthetic import SubjectLoader
# training parameters
max_steps = 20000
init_batch_size = 1024
target_sample_batch_size = 1 << 18
weight_decay = (
1e-5 if args.scene in ["materials", "ficus", "drums"] else 1e-6
)
# scene parameters
aabb = torch.tensor([-1.5, -1.5, -1.5, 1.5, 1.5, 1.5], device=device)
near_plane = 0.0
far_plane = 1.0e10
# dataset parameters
train_dataset_kwargs = {}
test_dataset_kwargs = {}
# model parameters
grid_resolution = 128
grid_nlvl = 1
# render parameters
render_step_size = 5e-3
alpha_thre = 0.0
cone_angle = 0.0
train_dataset = SubjectLoader(
subject_id=args.scene,
root_fp=args.data_root,
split=args.train_split,
num_rays=init_batch_size,
device=device,
**train_dataset_kwargs,
)
test_dataset = SubjectLoader(
subject_id=args.scene,
root_fp=args.data_root,
split="test",
num_rays=None,
device=device,
**test_dataset_kwargs,
)
if args.vdb:
from fvdb import sparse_grid_from_dense
from nerfacc.estimators.vdb import VDBEstimator
assert grid_nlvl == 1, "VDBEstimator only supports grid_nlvl=1"
voxel_sizes = (aabb[3:] - aabb[:3]) / grid_resolution
origins = aabb[:3] + voxel_sizes / 2
grid = sparse_grid_from_dense(
1,
(grid_resolution, grid_resolution, grid_resolution),
voxel_sizes=voxel_sizes,
origins=origins,
)
estimator = VDBEstimator(grid).to(device)
estimator.aabbs = [aabb]
else:
estimator = OccGridEstimator(
roi_aabb=aabb, resolution=grid_resolution, levels=grid_nlvl
).to(device)
# setup the radiance field we want to train.
grad_scaler = torch.cuda.amp.GradScaler(2**10)
radiance_field = NGPRadianceField(aabb=estimator.aabbs[-1]).to(device)
optimizer = torch.optim.Adam(
radiance_field.parameters(),
lr=1e-2,
eps=1e-15,
weight_decay=weight_decay,
)
scheduler = torch.optim.lr_scheduler.ChainedScheduler(
[
torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=0.01, total_iters=100
),
torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[
max_steps // 2,
max_steps * 3 // 4,
max_steps * 9 // 10,
],
gamma=0.33,
),
]
)
lpips_net = LPIPS(net="vgg").to(device)
lpips_norm_fn = lambda x: x[None, ...].permute(0, 3, 1, 2) * 2 - 1
lpips_fn = lambda x, y: lpips_net(lpips_norm_fn(x), lpips_norm_fn(y)).mean()
# training
tic = time.time()
for step in range(max_steps + 1):
radiance_field.train()
estimator.train()
i = torch.randint(0, len(train_dataset), (1,)).item()
data = train_dataset[i]
render_bkgd = data["color_bkgd"]
rays = data["rays"]
pixels = data["pixels"]
def occ_eval_fn(x):
density = radiance_field.query_density(x)
return density * render_step_size
# update occupancy grid
estimator.update_every_n_steps(
step=step,
occ_eval_fn=occ_eval_fn,
occ_thre=1e-2,
)
# render
rgb, acc, depth, n_rendering_samples = render_image_with_occgrid(
radiance_field,
estimator,
rays,
# rendering options
near_plane=near_plane,
render_step_size=render_step_size,
render_bkgd=render_bkgd,
cone_angle=cone_angle,
alpha_thre=alpha_thre,
)
if n_rendering_samples == 0:
continue
if target_sample_batch_size > 0:
# dynamic batch size for rays to keep sample batch size constant.
num_rays = len(pixels)
num_rays = int(
num_rays
* (target_sample_batch_size / float(n_rendering_samples))
)
train_dataset.update_num_rays(num_rays)
# compute loss
loss = F.smooth_l1_loss(rgb, pixels)
optimizer.zero_grad()
# do not unscale it because we are using Adam.
grad_scaler.scale(loss).backward()
optimizer.step()
scheduler.step()
if step % 10000 == 0:
elapsed_time = time.time() - tic
loss = F.mse_loss(rgb, pixels)
psnr = -10.0 * torch.log(loss) / np.log(10.0)
print(
f"elapsed_time={elapsed_time:.2f}s | step={step} | "
f"loss={loss:.5f} | psnr={psnr:.2f} | "
f"n_rendering_samples={n_rendering_samples:d} | num_rays={len(pixels):d} | "
f"max_depth={depth.max():.3f} | "
)
if step > 0 and step % max_steps == 0:
# evaluation
radiance_field.eval()
estimator.eval()
psnrs = []
lpips = []
with torch.no_grad():
for i in tqdm.tqdm(range(len(test_dataset))):
data = test_dataset[i]
render_bkgd = data["color_bkgd"]
rays = data["rays"]
pixels = data["pixels"]
# rendering
# rgb, acc, depth, _ = render_image_with_occgrid_test(
# 1024,
# # scene
# radiance_field,
# estimator,
# rays,
# # rendering options
# near_plane=near_plane,
# render_step_size=render_step_size,
# render_bkgd=render_bkgd,
# cone_angle=cone_angle,
# alpha_thre=alpha_thre,
# )
rgb, acc, depth, _ = render_image_with_occgrid(
radiance_field,
estimator,
rays,
# rendering options
near_plane=near_plane,
render_step_size=render_step_size,
render_bkgd=render_bkgd,
cone_angle=cone_angle,
alpha_thre=alpha_thre,
)
mse = F.mse_loss(rgb, pixels)
psnr = -10.0 * torch.log(mse) / np.log(10.0)
psnrs.append(psnr.item())
lpips.append(lpips_fn(rgb, pixels).item())
# if i == 0:
# imageio.imwrite(
# "rgb_test.png",
# (rgb.cpu().numpy() * 255).astype(np.uint8),
# )
# imageio.imwrite(
# "rgb_error.png",
# (
# (rgb - pixels).norm(dim=-1).cpu().numpy() * 255
# ).astype(np.uint8),
# )
psnr_avg = sum(psnrs) / len(psnrs)
lpips_avg = sum(lpips) / len(lpips)
print(f"evaluation: psnr_avg={psnr_avg}, lpips_avg={lpips_avg}") | null |
189,547 | import collections
import os
import sys
import imageio
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from .utils import Rays
from scene_manager import SceneManager
def _load_colmap(root_fp: str, subject_id: str, factor: int = 1):
assert factor in [1, 2, 4, 8]
data_dir = os.path.join(root_fp, subject_id)
colmap_dir = os.path.join(data_dir, "sparse/0/")
manager = SceneManager(colmap_dir)
manager.load_cameras()
manager.load_images()
# Assume shared intrinsics between all cameras.
cam = manager.cameras[1]
fx, fy, cx, cy = cam.fx, cam.fy, cam.cx, cam.cy
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
K[:2, :] /= factor
# Extract extrinsic matrices in world-to-camera format.
imdata = manager.images
w2c_mats = []
bottom = np.array([0, 0, 0, 1]).reshape(1, 4)
for k in imdata:
im = imdata[k]
rot = im.R()
trans = im.tvec.reshape(3, 1)
w2c = np.concatenate([np.concatenate([rot, trans], 1), bottom], axis=0)
w2c_mats.append(w2c)
w2c_mats = np.stack(w2c_mats, axis=0)
# Convert extrinsics to camera-to-world.
camtoworlds = np.linalg.inv(w2c_mats)
# Image names from COLMAP. No need for permuting the poses according to
# image names anymore.
image_names = [imdata[k].name for k in imdata]
# # Switch from COLMAP (right, down, fwd) to Nerf (right, up, back) frame.
# poses = poses @ np.diag([1, -1, -1, 1])
# Get distortion parameters.
type_ = cam.camera_type
if type_ == 0 or type_ == "SIMPLE_PINHOLE":
params = None
camtype = "perspective"
elif type_ == 1 or type_ == "PINHOLE":
params = None
camtype = "perspective"
if type_ == 2 or type_ == "SIMPLE_RADIAL":
params = {k: 0.0 for k in ["k1", "k2", "k3", "p1", "p2"]}
params["k1"] = cam.k1
camtype = "perspective"
elif type_ == 3 or type_ == "RADIAL":
params = {k: 0.0 for k in ["k1", "k2", "k3", "p1", "p2"]}
params["k1"] = cam.k1
params["k2"] = cam.k2
camtype = "perspective"
elif type_ == 4 or type_ == "OPENCV":
params = {k: 0.0 for k in ["k1", "k2", "k3", "p1", "p2"]}
params["k1"] = cam.k1
params["k2"] = cam.k2
params["p1"] = cam.p1
params["p2"] = cam.p2
camtype = "perspective"
elif type_ == 5 or type_ == "OPENCV_FISHEYE":
params = {k: 0.0 for k in ["k1", "k2", "k3", "k4"]}
params["k1"] = cam.k1
params["k2"] = cam.k2
params["k3"] = cam.k3
params["k4"] = cam.k4
camtype = "fisheye"
assert params is None, "Only support pinhole camera model."
# Previous Nerf results were generated with images sorted by filename,
# ensure metrics are reported on the same test set.
inds = np.argsort(image_names)
image_names = [image_names[i] for i in inds]
camtoworlds = camtoworlds[inds]
# Load images.
if factor > 1:
image_dir_suffix = f"_{factor}"
else:
image_dir_suffix = ""
colmap_image_dir = os.path.join(data_dir, "images")
image_dir = os.path.join(data_dir, "images" + image_dir_suffix)
for d in [image_dir, colmap_image_dir]:
if not os.path.exists(d):
raise ValueError(f"Image folder {d} does not exist.")
# Downsampled images may have different names vs images used for COLMAP,
# so we need to map between the two sorted lists of files.
colmap_files = sorted(os.listdir(colmap_image_dir))
image_files = sorted(os.listdir(image_dir))
colmap_to_image = dict(zip(colmap_files, image_files))
image_paths = [
os.path.join(image_dir, colmap_to_image[f]) for f in image_names
]
print("loading images")
images = [imageio.imread(x) for x in tqdm.tqdm(image_paths)]
images = np.stack(images, axis=0)
# Select the split.
all_indices = np.arange(images.shape[0])
split_indices = {
"test": all_indices[all_indices % 8 == 0],
"train": all_indices[all_indices % 8 != 0],
}
return images, camtoworlds, K, split_indices | null |
189,548 | import collections
import os
import sys
import imageio
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from .utils import Rays
from scene_manager import SceneManager
The provided code snippet includes necessary dependencies for implementing the `similarity_from_cameras` function. Write a Python function `def similarity_from_cameras(c2w, strict_scaling)` to solve the following problem:
reference: nerf-factory Get a similarity transform to normalize dataset from c2w (OpenCV convention) cameras :param c2w: (N, 4) :return T (4,4) , scale (float)
Here is the function:
def similarity_from_cameras(c2w, strict_scaling):
"""
reference: nerf-factory
Get a similarity transform to normalize dataset
from c2w (OpenCV convention) cameras
:param c2w: (N, 4)
:return T (4,4) , scale (float)
"""
t = c2w[:, :3, 3]
R = c2w[:, :3, :3]
# (1) Rotate the world so that z+ is the up axis
# we estimate the up axis by averaging the camera up axes
ups = np.sum(R * np.array([0, -1.0, 0]), axis=-1)
world_up = np.mean(ups, axis=0)
world_up /= np.linalg.norm(world_up)
up_camspace = np.array([0.0, -1.0, 0.0])
c = (up_camspace * world_up).sum()
cross = np.cross(world_up, up_camspace)
skew = np.array(
[
[0.0, -cross[2], cross[1]],
[cross[2], 0.0, -cross[0]],
[-cross[1], cross[0], 0.0],
]
)
if c > -1:
R_align = np.eye(3) + skew + (skew @ skew) * 1 / (1 + c)
else:
# In the unlikely case the original data has y+ up axis,
# rotate 180-deg about x axis
R_align = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
# R_align = np.eye(3) # DEBUG
R = R_align @ R
fwds = np.sum(R * np.array([0, 0.0, 1.0]), axis=-1)
t = (R_align @ t[..., None])[..., 0]
# (2) Recenter the scene using camera center rays
# find the closest point to the origin for each camera's center ray
nearest = t + (fwds * -t).sum(-1)[:, None] * fwds
# median for more robustness
translate = -np.median(nearest, axis=0)
# translate = -np.mean(t, axis=0) # DEBUG
transform = np.eye(4)
transform[:3, 3] = translate
transform[:3, :3] = R_align
# (3) Rescale the scene using camera distances
scale_fn = np.max if strict_scaling else np.median
scale = 1.0 / scale_fn(np.linalg.norm(t + translate, axis=-1))
return transform, scale | reference: nerf-factory Get a similarity transform to normalize dataset from c2w (OpenCV convention) cameras :param c2w: (N, 4) :return T (4,4) , scale (float) |
189,549 | import collections
import json
import os
import imageio.v2 as imageio
import numpy as np
import torch
import torch.nn.functional as F
from .utils import Rays
The provided code snippet includes necessary dependencies for implementing the `_load_renderings` function. Write a Python function `def _load_renderings(root_fp: str, subject_id: str, split: str)` to solve the following problem:
Load images from disk.
Here is the function:
def _load_renderings(root_fp: str, subject_id: str, split: str):
"""Load images from disk."""
if not root_fp.startswith("/"):
# allow relative path. e.g., "./data/nerf_synthetic/"
root_fp = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"..",
root_fp,
)
data_dir = os.path.join(root_fp, subject_id)
with open(
os.path.join(data_dir, "transforms_{}.json".format(split)), "r"
) as fp:
meta = json.load(fp)
images = []
camtoworlds = []
for i in range(len(meta["frames"])):
frame = meta["frames"][i]
fname = os.path.join(data_dir, frame["file_path"] + ".png")
rgba = imageio.imread(fname)
camtoworlds.append(frame["transform_matrix"])
images.append(rgba)
images = np.stack(images, axis=0)
camtoworlds = np.stack(camtoworlds, axis=0)
h, w = images.shape[1:3]
camera_angle_x = float(meta["camera_angle_x"])
focal = 0.5 * w / np.tan(0.5 * camera_angle_x)
return images, camtoworlds, focal | Load images from disk. |
189,550 | import json
import os
import imageio.v2 as imageio
import numpy as np
import torch
import torch.nn.functional as F
from .utils import Rays
The provided code snippet includes necessary dependencies for implementing the `_load_renderings` function. Write a Python function `def _load_renderings(root_fp: str, subject_id: str, split: str)` to solve the following problem:
Load images from disk.
Here is the function:
def _load_renderings(root_fp: str, subject_id: str, split: str):
"""Load images from disk."""
if not root_fp.startswith("/"):
# allow relative path. e.g., "./data/dnerf_synthetic/"
root_fp = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"..",
root_fp,
)
data_dir = os.path.join(root_fp, subject_id)
with open(
os.path.join(data_dir, "transforms_{}.json".format(split)), "r"
) as fp:
meta = json.load(fp)
images = []
camtoworlds = []
timestamps = []
for i in range(len(meta["frames"])):
frame = meta["frames"][i]
fname = os.path.join(data_dir, frame["file_path"] + ".png")
rgba = imageio.imread(fname)
timestamp = (
frame["time"]
if "time" in frame
else float(i) / (len(meta["frames"]) - 1)
)
timestamps.append(timestamp)
camtoworlds.append(frame["transform_matrix"])
images.append(rgba)
images = np.stack(images, axis=0)
camtoworlds = np.stack(camtoworlds, axis=0)
timestamps = np.stack(timestamps, axis=0)
h, w = images.shape[1:3]
camera_angle_x = float(meta["camera_angle_x"])
focal = 0.5 * w / np.tan(0.5 * camera_angle_x)
return images, camtoworlds, focal, timestamps | Load images from disk. |
189,551 | import glob
import json
import os
import shutil
from subprocess import DEVNULL, call
from rich.console import Console
from torch.utils.cpp_extension import _get_build_directory, load
The provided code snippet includes necessary dependencies for implementing the `cuda_toolkit_available` function. Write a Python function `def cuda_toolkit_available()` to solve the following problem:
Check if the nvcc is avaiable on the machine.
Here is the function:
def cuda_toolkit_available():
"""Check if the nvcc is avaiable on the machine."""
try:
call(["nvcc"], stdout=DEVNULL, stderr=DEVNULL)
return True
except FileNotFoundError:
return False | Check if the nvcc is avaiable on the machine. |
189,552 | import glob
import json
import os
import shutil
from subprocess import DEVNULL, call
from rich.console import Console
from torch.utils.cpp_extension import _get_build_directory, load
The provided code snippet includes necessary dependencies for implementing the `cuda_toolkit_version` function. Write a Python function `def cuda_toolkit_version()` to solve the following problem:
Get the cuda toolkit version.
Here is the function:
def cuda_toolkit_version():
"""Get the cuda toolkit version."""
cuda_home = os.path.join(os.path.dirname(shutil.which("nvcc")), "..")
if os.path.exists(os.path.join(cuda_home, "version.txt")):
with open(os.path.join(cuda_home, "version.txt")) as f:
cuda_version = f.read().strip().split()[-1]
elif os.path.exists(os.path.join(cuda_home, "version.json")):
with open(os.path.join(cuda_home, "version.json")) as f:
cuda_version = json.load(f)["cuda"]["version"]
else:
raise RuntimeError("Cannot find the cuda version.")
return cuda_version | Get the cuda toolkit version. |
189,553 | from typing import Tuple, Union
import torch
from torch import Tensor
from . import cuda as _C
from .data_specs import RayIntervals, RaySamples
class RaySamples:
"""Ray samples that supports batched and flattened data.
Note:
When `vals` is flattened, either `packed_info` or `ray_indices` must
be provided.
Args:
vals: Batched data with shape (n_rays, n_samples) or flattened data
with shape (all_samples,)
packed_info: Optional. A tensor of shape (n_rays, 2) that specifies
the start and count of each chunk in flattened `vals`, with in
total n_rays chunks. Only needed when `vals` is flattened.
ray_indices: Optional. A tensor of shape (all_samples,) that specifies
the ray index of each sample. Only needed when `vals` is flattened.
Examples:
.. code-block:: python
>>> # Batched data
>>> ray_samples = RaySamples(torch.rand(10, 100))
>>> # Flattened data
>>> ray_samples = RaySamples(
>>> torch.rand(1000),
>>> packed_info=torch.tensor([[0, 100], [100, 200], [300, 700]]),
>>> )
"""
vals: torch.Tensor
packed_info: Optional[torch.Tensor] = None
ray_indices: Optional[torch.Tensor] = None
is_valid: Optional[torch.Tensor] = None
def _to_cpp(self):
"""
Generate object to pass to C++
"""
spec = _C.RaySegmentsSpec()
spec.vals = self.vals.contiguous()
if self.packed_info is not None:
spec.chunk_starts = self.packed_info[:, 0].contiguous()
if self.chunk_cnts is not None:
spec.chunk_cnts = self.packed_info[:, 1].contiguous()
if self.ray_indices is not None:
spec.ray_indices = self.ray_indices.contiguous()
return spec
def _from_cpp(cls, spec):
"""
Generate object from C++
"""
if spec.chunk_starts is not None and spec.chunk_cnts is not None:
packed_info = torch.stack([spec.chunk_starts, spec.chunk_cnts], -1)
else:
packed_info = None
ray_indices = spec.ray_indices
if spec.is_valid is not None:
is_valid = spec.is_valid
else:
is_valid = None
vals = spec.vals
return cls(
vals=vals,
packed_info=packed_info,
ray_indices=ray_indices,
is_valid=is_valid,
)
def device(self) -> torch.device:
return self.vals.device
class RayIntervals:
"""Ray intervals that supports batched and flattened data.
Each interval is defined by two edges (left and right). The attribute `vals`
stores the edges of all intervals along the rays. The attributes `is_left`
and `is_right` are for indicating whether each edge is a left or right edge.
This class unifies the representation of both continuous and non-continuous ray
intervals.
Note:
When `vals` is flattened, either `packed_info` or `ray_indices` must
be provided. Also both `is_left` and `is_right` must be provided.
Args:
vals: Batched data with shape (n_rays, n_edges) or flattened data
with shape (all_edges,)
packed_info: Optional. A tensor of shape (n_rays, 2) that specifies
the start and count of each chunk in flattened `vals`, with in
total n_rays chunks. Only needed when `vals` is flattened.
ray_indices: Optional. A tensor of shape (all_edges,) that specifies
the ray index of each edge. Only needed when `vals` is flattened.
is_left: Optional. A boolen tensor of shape (all_edges,) that specifies
whether each edge is a left edge. Only needed when `vals` is flattened.
is_right: Optional. A boolen tensor of shape (all_edges,) that specifies
whether each edge is a right edge. Only needed when `vals` is flattened.
Examples:
.. code-block:: python
>>> # Batched data
>>> ray_intervals = RayIntervals(torch.rand(10, 100))
>>> # Flattened data
>>> ray_intervals = RayIntervals(
>>> torch.rand(6),
>>> packed_info=torch.tensor([[0, 2], [2, 0], [2, 4]]),
>>> is_left=torch.tensor([True, False, True, True, True, False]),
>>> is_right=torch.tensor([False, True, False, True, True, True]),
>>> )
"""
vals: torch.Tensor
packed_info: Optional[torch.Tensor] = None
ray_indices: Optional[torch.Tensor] = None
is_left: Optional[torch.Tensor] = None
is_right: Optional[torch.Tensor] = None
def _to_cpp(self):
"""
Generate object to pass to C++
"""
spec = _C.RaySegmentsSpec()
spec.vals = self.vals.contiguous()
if self.packed_info is not None:
spec.chunk_starts = self.packed_info[:, 0].contiguous()
if self.packed_info is not None:
spec.chunk_cnts = self.packed_info[:, 1].contiguous()
if self.ray_indices is not None:
spec.ray_indices = self.ray_indices.contiguous()
if self.is_left is not None:
spec.is_left = self.is_left.contiguous()
if self.is_right is not None:
spec.is_right = self.is_right.contiguous()
return spec
def _from_cpp(cls, spec):
"""
Generate object from C++
"""
if spec.chunk_starts is not None and spec.chunk_cnts is not None:
packed_info = torch.stack([spec.chunk_starts, spec.chunk_cnts], -1)
else:
packed_info = None
ray_indices = spec.ray_indices
is_left = spec.is_left
is_right = spec.is_right
return cls(
vals=spec.vals,
packed_info=packed_info,
ray_indices=ray_indices,
is_left=is_left,
is_right=is_right,
)
def device(self) -> torch.device:
return self.vals.device
The provided code snippet includes necessary dependencies for implementing the `importance_sampling` function. Write a Python function `def importance_sampling( intervals: RayIntervals, cdfs: Tensor, n_intervals_per_ray: Union[Tensor, int], stratified: bool = False, ) -> Tuple[RayIntervals, RaySamples]` to solve the following problem:
Importance sampling that supports flattened tensor. Given a set of intervals and the corresponding CDFs at the interval edges, this function performs inverse transform sampling to create a new set of intervals and samples. Stratified sampling is also supported. Args: intervals: A :class:`RayIntervals` object that specifies the edges of the intervals along the rays. cdfs: The CDFs at the interval edges. It has the same shape as `intervals.vals`. n_intervals_per_ray: Resample each ray to have this many intervals. If it is a tensor, it must be of shape (n_rays,). If it is an int, it is broadcasted to all rays. stratified: If True, perform stratified sampling. Returns: A tuple of {:class:`RayIntervals`, :class:`RaySamples`}: - **intervals**: A :class:`RayIntervals` object. If `n_intervals_per_ray` is an int, \ `intervals.vals` will has the shape of (n_rays, n_intervals_per_ray + 1). \ If `n_intervals_per_ray` is a tensor, we assume each ray results \ in a different number of intervals. In this case, `intervals.vals` \ will has the shape of (all_edges,), the attributes `packed_info`, \ `ray_indices`, `is_left` and `is_right` will be accessable. - **samples**: A :class:`RaySamples` object. If `n_intervals_per_ray` is an int, \ `samples.vals` will has the shape of (n_rays, n_intervals_per_ray). \ If `n_intervals_per_ray` is a tensor, we assume each ray results \ in a different number of intervals. In this case, `samples.vals` \ will has the shape of (all_samples,), the attributes `packed_info` and \ `ray_indices` will be accessable. Example: .. code-block:: python >>> intervals = RayIntervals( ... vals=torch.tensor([0.0, 1.0, 0.0, 1.0, 2.0], device="cuda"), ... packed_info=torch.tensor([[0, 2], [2, 3]], device="cuda"), ... ) >>> cdfs = torch.tensor([0.0, 0.5, 0.0, 0.5, 1.0], device="cuda") >>> n_intervals_per_ray = 2 >>> intervals, samples = importance_sampling(intervals, cdfs, n_intervals_per_ray) >>> intervals.vals tensor([[0.0000, 0.5000, 1.0000], [0.0000, 1.0000, 2.0000]], device='cuda:0') >>> samples.vals tensor([[0.2500, 0.7500], [0.5000, 1.5000]], device='cuda:0')
Here is the function:
def importance_sampling(
intervals: RayIntervals,
cdfs: Tensor,
n_intervals_per_ray: Union[Tensor, int],
stratified: bool = False,
) -> Tuple[RayIntervals, RaySamples]:
"""Importance sampling that supports flattened tensor.
Given a set of intervals and the corresponding CDFs at the interval edges,
this function performs inverse transform sampling to create a new set of
intervals and samples. Stratified sampling is also supported.
Args:
intervals: A :class:`RayIntervals` object that specifies the edges of the
intervals along the rays.
cdfs: The CDFs at the interval edges. It has the same shape as
`intervals.vals`.
n_intervals_per_ray: Resample each ray to have this many intervals.
If it is a tensor, it must be of shape (n_rays,). If it is an int,
it is broadcasted to all rays.
stratified: If True, perform stratified sampling.
Returns:
A tuple of {:class:`RayIntervals`, :class:`RaySamples`}:
- **intervals**: A :class:`RayIntervals` object. If `n_intervals_per_ray` is an int, \
`intervals.vals` will has the shape of (n_rays, n_intervals_per_ray + 1). \
If `n_intervals_per_ray` is a tensor, we assume each ray results \
in a different number of intervals. In this case, `intervals.vals` \
will has the shape of (all_edges,), the attributes `packed_info`, \
`ray_indices`, `is_left` and `is_right` will be accessable.
- **samples**: A :class:`RaySamples` object. If `n_intervals_per_ray` is an int, \
`samples.vals` will has the shape of (n_rays, n_intervals_per_ray). \
If `n_intervals_per_ray` is a tensor, we assume each ray results \
in a different number of intervals. In this case, `samples.vals` \
will has the shape of (all_samples,), the attributes `packed_info` and \
`ray_indices` will be accessable.
Example:
.. code-block:: python
>>> intervals = RayIntervals(
... vals=torch.tensor([0.0, 1.0, 0.0, 1.0, 2.0], device="cuda"),
... packed_info=torch.tensor([[0, 2], [2, 3]], device="cuda"),
... )
>>> cdfs = torch.tensor([0.0, 0.5, 0.0, 0.5, 1.0], device="cuda")
>>> n_intervals_per_ray = 2
>>> intervals, samples = importance_sampling(intervals, cdfs, n_intervals_per_ray)
>>> intervals.vals
tensor([[0.0000, 0.5000, 1.0000],
[0.0000, 1.0000, 2.0000]], device='cuda:0')
>>> samples.vals
tensor([[0.2500, 0.7500],
[0.5000, 1.5000]], device='cuda:0')
"""
if isinstance(n_intervals_per_ray, Tensor):
n_intervals_per_ray = n_intervals_per_ray.contiguous()
intervals, samples = _C.importance_sampling(
intervals._to_cpp(),
cdfs.contiguous(),
n_intervals_per_ray,
stratified,
)
return RayIntervals._from_cpp(intervals), RaySamples._from_cpp(samples) | Importance sampling that supports flattened tensor. Given a set of intervals and the corresponding CDFs at the interval edges, this function performs inverse transform sampling to create a new set of intervals and samples. Stratified sampling is also supported. Args: intervals: A :class:`RayIntervals` object that specifies the edges of the intervals along the rays. cdfs: The CDFs at the interval edges. It has the same shape as `intervals.vals`. n_intervals_per_ray: Resample each ray to have this many intervals. If it is a tensor, it must be of shape (n_rays,). If it is an int, it is broadcasted to all rays. stratified: If True, perform stratified sampling. Returns: A tuple of {:class:`RayIntervals`, :class:`RaySamples`}: - **intervals**: A :class:`RayIntervals` object. If `n_intervals_per_ray` is an int, \ `intervals.vals` will has the shape of (n_rays, n_intervals_per_ray + 1). \ If `n_intervals_per_ray` is a tensor, we assume each ray results \ in a different number of intervals. In this case, `intervals.vals` \ will has the shape of (all_edges,), the attributes `packed_info`, \ `ray_indices`, `is_left` and `is_right` will be accessable. - **samples**: A :class:`RaySamples` object. If `n_intervals_per_ray` is an int, \ `samples.vals` will has the shape of (n_rays, n_intervals_per_ray). \ If `n_intervals_per_ray` is a tensor, we assume each ray results \ in a different number of intervals. In this case, `samples.vals` \ will has the shape of (all_samples,), the attributes `packed_info` and \ `ray_indices` will be accessable. Example: .. code-block:: python >>> intervals = RayIntervals( ... vals=torch.tensor([0.0, 1.0, 0.0, 1.0, 2.0], device="cuda"), ... packed_info=torch.tensor([[0, 2], [2, 3]], device="cuda"), ... ) >>> cdfs = torch.tensor([0.0, 0.5, 0.0, 0.5, 1.0], device="cuda") >>> n_intervals_per_ray = 2 >>> intervals, samples = importance_sampling(intervals, cdfs, n_intervals_per_ray) >>> intervals.vals tensor([[0.0000, 0.5000, 1.0000], [0.0000, 1.0000, 2.0000]], device='cuda:0') >>> samples.vals tensor([[0.2500, 0.7500], [0.5000, 1.5000]], device='cuda:0') |
189,554 | from typing import Tuple, Union
import torch
from torch import Tensor
from . import cuda as _C
from .data_specs import RayIntervals, RaySamples
def searchsorted(
sorted_sequence: Union[RayIntervals, RaySamples],
values: Union[RayIntervals, RaySamples],
) -> Tuple[Tensor, Tensor]:
"""Searchsorted that supports flattened tensor.
This function returns {`ids_left`, `ids_right`} such that:
`sorted_sequence.vals.gather(-1, ids_left) <= values.vals < sorted_sequence.vals.gather(-1, ids_right)`
Note:
When values is out of range of sorted_sequence, we return the
corresponding ids as if the values is clipped to the range of
sorted_sequence. See the example below.
Args:
sorted_sequence: A :class:`RayIntervals` or :class:`RaySamples` object. We assume
the `sorted_sequence.vals` is acendingly sorted for each ray.
values: A :class:`RayIntervals` or :class:`RaySamples` object.
Returns:
A tuple of LongTensor:
- **ids_left**: A LongTensor with the same shape as `values.vals`.
- **ids_right**: A LongTensor with the same shape as `values.vals`.
Example:
>>> sorted_sequence = RayIntervals(
... vals=torch.tensor([0.0, 1.0, 0.0, 1.0, 2.0], device="cuda"),
... packed_info=torch.tensor([[0, 2], [2, 3]], device="cuda"),
... )
>>> values = RayIntervals(
... vals=torch.tensor([0.5, 1.5, 2.5], device="cuda"),
... packed_info=torch.tensor([[0, 1], [1, 2]], device="cuda"),
... )
>>> ids_left, ids_right = searchsorted(sorted_sequence, values)
>>> ids_left
tensor([0, 3, 3], device='cuda:0')
>>> ids_right
tensor([1, 4, 4], device='cuda:0')
>>> sorted_sequence.vals.gather(-1, ids_left)
tensor([0., 1., 1.], device='cuda:0')
>>> sorted_sequence.vals.gather(-1, ids_right)
tensor([1., 2., 2.], device='cuda:0')
"""
ids_left, ids_right = _C.searchsorted(
values._to_cpp(), sorted_sequence._to_cpp()
)
return ids_left, ids_right
The provided code snippet includes necessary dependencies for implementing the `_sample_from_weighted` function. Write a Python function `def _sample_from_weighted( bins: Tensor, weights: Tensor, num_samples: int, stratified: bool = False, vmin: float = -torch.inf, vmax: float = torch.inf, ) -> Tuple[Tensor, Tensor]` to solve the following problem:
Args: bins: (..., B + 1). weights: (..., B). Returns: samples: (..., S + 1).
Here is the function:
def _sample_from_weighted(
bins: Tensor,
weights: Tensor,
num_samples: int,
stratified: bool = False,
vmin: float = -torch.inf,
vmax: float = torch.inf,
) -> Tuple[Tensor, Tensor]:
import torch.nn.functional as F
"""
Args:
bins: (..., B + 1).
weights: (..., B).
Returns:
samples: (..., S + 1).
"""
B = weights.shape[-1]
S = num_samples
assert bins.shape[-1] == B + 1
dtype, device = bins.dtype, bins.device
eps = torch.finfo(weights.dtype).eps
# (..., B).
pdf = F.normalize(weights, p=1, dim=-1)
# (..., B + 1).
cdf = torch.cat(
[
torch.zeros_like(pdf[..., :1]),
torch.cumsum(pdf[..., :-1], dim=-1),
torch.ones_like(pdf[..., :1]),
],
dim=-1,
)
# (..., S). Sample positions between [0, 1).
if not stratified:
pad = 1 / (2 * S)
# Get the center of each pdf bins.
u = torch.linspace(pad, 1 - pad - eps, S, dtype=dtype, device=device)
u = u.broadcast_to(bins.shape[:-1] + (S,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / S
max_jitter = (1 - u_max) / (S - 1) - eps
# Only perform one jittering per ray (`single_jitter` in the original
# implementation.)
u = (
torch.linspace(0, 1 - u_max, S, dtype=dtype, device=device)
+ torch.rand(
*bins.shape[:-1],
1,
dtype=dtype,
device=device,
)
* max_jitter
)
# (..., S).
ceil = torch.searchsorted(cdf.contiguous(), u.contiguous(), side="right")
floor = ceil - 1
# (..., S * 2).
inds = torch.cat([floor, ceil], dim=-1)
# (..., S).
cdf0, cdf1 = cdf.gather(-1, inds).split(S, dim=-1)
b0, b1 = bins.gather(-1, inds).split(S, dim=-1)
# (..., S). Linear interpolation in 1D.
t = (u - cdf0) / torch.clamp(cdf1 - cdf0, min=eps)
# Sample centers.
centers = b0 + t * (b1 - b0)
samples = (centers[..., 1:] + centers[..., :-1]) / 2
samples = torch.cat(
[
(2 * centers[..., :1] - samples[..., :1]).clamp_min(vmin),
samples,
(2 * centers[..., -1:] - samples[..., -1:]).clamp_max(vmax),
],
dim=-1,
)
return samples, centers | Args: bins: (..., B + 1). weights: (..., B). Returns: samples: (..., S + 1). |
189,555 | from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from . import cuda as _C
The provided code snippet includes necessary dependencies for implementing the `opencv_lens_undistortion` function. Write a Python function `def opencv_lens_undistortion( uv: Tensor, params: Tensor, eps: float = 1e-6, iters: int = 10 ) -> Tensor` to solve the following problem:
Undistort the opencv distortion. Note: This function is not differentiable to any inputs. Args: uv: (..., 2) UV coordinates. params: (..., N) or (N) OpenCV distortion parameters. We support N = 0, 1, 2, 4, 8. If N = 0, we return the input uv directly. If N = 1, we assume the input is {k1}. If N = 2, we assume the input is {k1, k2}. If N = 4, we assume the input is {k1, k2, p1, p2}. If N = 8, we assume the input is {k1, k2, p1, p2, k3, k4, k5, k6}. Returns: (..., 2) undistorted UV coordinates.
Here is the function:
def opencv_lens_undistortion(
uv: Tensor, params: Tensor, eps: float = 1e-6, iters: int = 10
) -> Tensor:
"""Undistort the opencv distortion.
Note:
This function is not differentiable to any inputs.
Args:
uv: (..., 2) UV coordinates.
params: (..., N) or (N) OpenCV distortion parameters. We support
N = 0, 1, 2, 4, 8. If N = 0, we return the input uv directly.
If N = 1, we assume the input is {k1}. If N = 2, we assume the
input is {k1, k2}. If N = 4, we assume the input is {k1, k2, p1, p2}.
If N = 8, we assume the input is {k1, k2, p1, p2, k3, k4, k5, k6}.
Returns:
(..., 2) undistorted UV coordinates.
"""
assert uv.shape[-1] == 2
assert params.shape[-1] in [0, 1, 2, 4, 8]
if params.shape[-1] == 0:
return uv
elif params.shape[-1] < 8:
params = F.pad(params, (0, 8 - params.shape[-1]), "constant", 0)
assert params.shape[-1] == 8
batch_shape = uv.shape[:-1]
params = torch.broadcast_to(params, batch_shape + (params.shape[-1],))
return _C.opencv_lens_undistortion(
uv.contiguous(), params.contiguous(), eps, iters
) | Undistort the opencv distortion. Note: This function is not differentiable to any inputs. Args: uv: (..., 2) UV coordinates. params: (..., N) or (N) OpenCV distortion parameters. We support N = 0, 1, 2, 4, 8. If N = 0, we return the input uv directly. If N = 1, we assume the input is {k1}. If N = 2, we assume the input is {k1, k2}. If N = 4, we assume the input is {k1, k2, p1, p2}. If N = 8, we assume the input is {k1, k2, p1, p2, k3, k4, k5, k6}. Returns: (..., 2) undistorted UV coordinates. |
189,556 | from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from . import cuda as _C
The provided code snippet includes necessary dependencies for implementing the `opencv_lens_undistortion_fisheye` function. Write a Python function `def opencv_lens_undistortion_fisheye( uv: Tensor, params: Tensor, eps: float = 1e-6, iters: int = 10 ) -> Tensor` to solve the following problem:
Undistort the opencv distortion of {k1, k2, k3, k4}. Note: This function is not differentiable to any inputs. Args: uv: (..., 2) UV coordinates. params: (..., 4) or (4) OpenCV distortion parameters. Returns: (..., 2) undistorted UV coordinates.
Here is the function:
def opencv_lens_undistortion_fisheye(
uv: Tensor, params: Tensor, eps: float = 1e-6, iters: int = 10
) -> Tensor:
"""Undistort the opencv distortion of {k1, k2, k3, k4}.
Note:
This function is not differentiable to any inputs.
Args:
uv: (..., 2) UV coordinates.
params: (..., 4) or (4) OpenCV distortion parameters.
Returns:
(..., 2) undistorted UV coordinates.
"""
assert uv.shape[-1] == 2
assert params.shape[-1] == 4
batch_shape = uv.shape[:-1]
params = torch.broadcast_to(params, batch_shape + (params.shape[-1],))
return _C.opencv_lens_undistortion_fisheye(
uv.contiguous(), params.contiguous(), eps, iters
) | Undistort the opencv distortion of {k1, k2, k3, k4}. Note: This function is not differentiable to any inputs. Args: uv: (..., 2) UV coordinates. params: (..., 4) or (4) OpenCV distortion parameters. Returns: (..., 2) undistorted UV coordinates. |
189,557 | from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from . import cuda as _C
The provided code snippet includes necessary dependencies for implementing the `_opencv_lens_distortion` function. Write a Python function `def _opencv_lens_distortion(uv: Tensor, params: Tensor) -> Tensor` to solve the following problem:
The opencv camera distortion of {k1, k2, p1, p2, k3, k4, k5, k6}. See https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html for more details.
Here is the function:
def _opencv_lens_distortion(uv: Tensor, params: Tensor) -> Tensor:
"""The opencv camera distortion of {k1, k2, p1, p2, k3, k4, k5, k6}.
See https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html for more details.
"""
k1, k2, p1, p2, k3, k4, k5, k6 = torch.unbind(params, dim=-1)
s1, s2, s3, s4 = 0, 0, 0, 0
u, v = torch.unbind(uv, dim=-1)
r2 = u * u + v * v
r4 = r2**2
r6 = r4 * r2
ratial = (1 + k1 * r2 + k2 * r4 + k3 * r6) / (
1 + k4 * r2 + k5 * r4 + k6 * r6
)
fx = 2 * p1 * u * v + p2 * (r2 + 2 * u * u) + s1 * r2 + s2 * r4
fy = 2 * p2 * u * v + p1 * (r2 + 2 * v * v) + s3 * r2 + s4 * r4
return torch.stack([u * ratial + fx, v * ratial + fy], dim=-1) | The opencv camera distortion of {k1, k2, p1, p2, k3, k4, k5, k6}. See https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html for more details. |
189,558 | from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from . import cuda as _C
The provided code snippet includes necessary dependencies for implementing the `_opencv_lens_distortion_fisheye` function. Write a Python function `def _opencv_lens_distortion_fisheye( uv: Tensor, params: Tensor, eps: float = 1e-10 ) -> Tensor` to solve the following problem:
The opencv camera distortion of {k1, k2, k3, p1, p2}. See https://docs.opencv.org/4.x/db/d58/group__calib3d__fisheye.html for more details. Args: uv: (..., 2) UV coordinates. params: (..., 4) or (4) OpenCV distortion parameters. Returns: (..., 2) distorted UV coordinates.
Here is the function:
def _opencv_lens_distortion_fisheye(
uv: Tensor, params: Tensor, eps: float = 1e-10
) -> Tensor:
"""The opencv camera distortion of {k1, k2, k3, p1, p2}.
See https://docs.opencv.org/4.x/db/d58/group__calib3d__fisheye.html for more details.
Args:
uv: (..., 2) UV coordinates.
params: (..., 4) or (4) OpenCV distortion parameters.
Returns:
(..., 2) distorted UV coordinates.
"""
assert params.shape[-1] == 4, f"Invalid params shape: {params.shape}"
k1, k2, k3, k4 = torch.unbind(params, dim=-1)
u, v = torch.unbind(uv, dim=-1)
r = torch.sqrt(u * u + v * v)
theta = torch.atan(r)
theta_d = theta * (
1
+ k1 * theta**2
+ k2 * theta**4
+ k3 * theta**6
+ k4 * theta**8
)
scale = theta_d / torch.clamp(r, min=eps)
return uv * scale[..., None] | The opencv camera distortion of {k1, k2, k3, p1, p2}. See https://docs.opencv.org/4.x/db/d58/group__calib3d__fisheye.html for more details. Args: uv: (..., 2) UV coordinates. params: (..., 4) or (4) OpenCV distortion parameters. Returns: (..., 2) distorted UV coordinates. |
189,559 | from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from . import cuda as _C
def _compute_residual_and_jacobian(
x: Tensor, y: Tensor, xd: Tensor, yd: Tensor, params: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
assert params.shape[-1] == 8
k1, k2, p1, p2, k3, k4, k5, k6 = torch.unbind(params, dim=-1)
# let r(x, y) = x^2 + y^2;
# alpha(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3;
# beta(x, y) = 1 + k4 * r(x, y) + k5 * r(x, y) ^2 + k6 * r(x, y)^3;
# d(x, y) = alpha(x, y) / beta(x, y);
r = x * x + y * y
alpha = 1.0 + r * (k1 + r * (k2 + r * k3))
beta = 1.0 + r * (k4 + r * (k5 + r * k6))
d = alpha / beta
# The perfect projection is:
# xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);
# yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);
#
# Let's define
#
# fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;
# fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;
#
# We are looking for a solution that satisfies
# fx(x, y) = fy(x, y) = 0;
fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd
fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd
# Compute derivative of alpha, beta over r.
alpha_r = k1 + r * (2.0 * k2 + r * (3.0 * k3))
beta_r = k4 + r * (2.0 * k5 + r * (3.0 * k6))
# Compute derivative of d over [x, y]
d_r = (alpha_r * beta - alpha * beta_r) / (beta * beta)
d_x = 2.0 * x * d_r
d_y = 2.0 * y * d_r
# Compute derivative of fx over x and y.
fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x
fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y
# Compute derivative of fy over x and y.
fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x
fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y
return fx, fy, fx_x, fx_y, fy_x, fy_y
The provided code snippet includes necessary dependencies for implementing the `_opencv_lens_undistortion` function. Write a Python function `def _opencv_lens_undistortion( uv: Tensor, params: Tensor, eps: float = 1e-6, iters: int = 10 ) -> Tensor` to solve the following problem:
Same as opencv_lens_undistortion(), but native PyTorch. Took from with bug fix and modification. https://github.com/nerfstudio-project/nerfstudio/blob/ec603634edbd61b13bdf2c598fda8c993370b8f7/nerfstudio/cameras/camera_utils.py
Here is the function:
def _opencv_lens_undistortion(
uv: Tensor, params: Tensor, eps: float = 1e-6, iters: int = 10
) -> Tensor:
"""Same as opencv_lens_undistortion(), but native PyTorch.
Took from with bug fix and modification.
https://github.com/nerfstudio-project/nerfstudio/blob/ec603634edbd61b13bdf2c598fda8c993370b8f7/nerfstudio/cameras/camera_utils.py
"""
assert uv.shape[-1] == 2
assert params.shape[-1] in [0, 1, 2, 4, 8]
if params.shape[-1] == 0:
return uv
elif params.shape[-1] < 8:
params = F.pad(params, (0, 8 - params.shape[-1]), "constant", 0.0)
assert params.shape[-1] == 8
# Initialize from the distorted point.
x, y = x0, y0 = torch.unbind(uv, dim=-1)
zeros = torch.zeros_like(x)
for _ in range(iters):
fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian(
x=x, y=y, xd=x0, yd=y0, params=params
)
denominator = fy_x * fx_y - fx_x * fy_y
mask = torch.abs(denominator) > eps
x_numerator = fx * fy_y - fy * fx_y
y_numerator = fy * fx_x - fx * fy_x
step_x = torch.where(mask, x_numerator / denominator, zeros)
step_y = torch.where(mask, y_numerator / denominator, zeros)
x = x + step_x
y = y + step_y
return torch.stack([x, y], dim=-1) | Same as opencv_lens_undistortion(), but native PyTorch. Took from with bug fix and modification. https://github.com/nerfstudio-project/nerfstudio/blob/ec603634edbd61b13bdf2c598fda8c993370b8f7/nerfstudio/cameras/camera_utils.py |
189,560 | from typing import Optional, Tuple
import torch
from torch import Tensor
from . import cuda as _C
from .data_specs import RayIntervals, RaySamples
The provided code snippet includes necessary dependencies for implementing the `_ray_aabb_intersect` function. Write a Python function `def _ray_aabb_intersect( rays_o: Tensor, rays_d: Tensor, aabbs: Tensor, near_plane: float = -float("inf"), far_plane: float = float("inf"), miss_value: float = float("inf"), ) -> Tuple[Tensor, Tensor, Tensor]` to solve the following problem:
Ray-AABB intersection. Functionally the same with `ray_aabb_intersect()`, but slower with pure Pytorch.
Here is the function:
def _ray_aabb_intersect(
rays_o: Tensor,
rays_d: Tensor,
aabbs: Tensor,
near_plane: float = -float("inf"),
far_plane: float = float("inf"),
miss_value: float = float("inf"),
) -> Tuple[Tensor, Tensor, Tensor]:
"""Ray-AABB intersection.
Functionally the same with `ray_aabb_intersect()`, but slower with pure Pytorch.
"""
# Compute the minimum and maximum bounds of the AABBs
aabb_min = aabbs[:, :3]
aabb_max = aabbs[:, 3:]
# Compute the intersection distances between the ray and each of the six AABB planes
t1 = (aabb_min[None, :, :] - rays_o[:, None, :]) / rays_d[:, None, :]
t2 = (aabb_max[None, :, :] - rays_o[:, None, :]) / rays_d[:, None, :]
# Compute the maximum tmin and minimum tmax for each AABB
t_mins = torch.max(torch.min(t1, t2), dim=-1)[0]
t_maxs = torch.min(torch.max(t1, t2), dim=-1)[0]
# Compute whether each ray-AABB pair intersects
hits = (t_maxs > t_mins) & (t_maxs > 0)
# Clip the tmin and tmax values to the near and far planes
t_mins = torch.clamp(t_mins, min=near_plane, max=far_plane)
t_maxs = torch.clamp(t_maxs, min=near_plane, max=far_plane)
# Set the tmin and tmax values to miss_value if there is no intersection
t_mins = torch.where(hits, t_mins, miss_value)
t_maxs = torch.where(hits, t_maxs, miss_value)
return t_mins, t_maxs, hits | Ray-AABB intersection. Functionally the same with `ray_aabb_intersect()`, but slower with pure Pytorch. |
189,561 | from typing import Optional, Tuple
import torch
from torch import Tensor
from . import cuda as _C
from .data_specs import RayIntervals, RaySamples
def _enlarge_aabb(aabb, factor: float) -> Tensor:
center = (aabb[:3] + aabb[3:]) / 2
extent = (aabb[3:] - aabb[:3]) / 2
return torch.cat([center - extent * factor, center + extent * factor]) | null |
189,562 | from typing import Optional, Tuple
import torch
from torch import Tensor
from . import cuda as _C
from .data_specs import RayIntervals, RaySamples
The provided code snippet includes necessary dependencies for implementing the `_query` function. Write a Python function `def _query(x: Tensor, data: Tensor, base_aabb: Tensor) -> Tensor` to solve the following problem:
Query the grid values at the given points. This function assumes the aabbs of multiple grids are 2x scaled. Args: x: (N, 3) tensor of points to query. data: (m, resx, resy, resz) tensor of grid values base_aabb: (6,) aabb of base level grid.
Here is the function:
def _query(x: Tensor, data: Tensor, base_aabb: Tensor) -> Tensor:
"""
Query the grid values at the given points.
This function assumes the aabbs of multiple grids are 2x scaled.
Args:
x: (N, 3) tensor of points to query.
data: (m, resx, resy, resz) tensor of grid values
base_aabb: (6,) aabb of base level grid.
"""
# normalize so that the base_aabb is [0, 1]^3
aabb_min, aabb_max = torch.split(base_aabb, 3, dim=0)
x_norm = (x - aabb_min) / (aabb_max - aabb_min)
# if maxval is almost zero, it will trigger frexpf to output 0
# for exponent, which is not what we want.
maxval = (x_norm - 0.5).abs().max(dim=-1).values
maxval = torch.clamp(maxval, min=0.1)
# compute the mip level
exponent = torch.frexp(maxval)[1].long()
mip = torch.clamp(exponent + 1, min=0)
selector = mip < data.shape[0]
# use the mip to re-normalize all points to [0, 1].
scale = 2**mip
x_unit = (x_norm - 0.5) / scale[:, None] + 0.5
# map to the grid index
resolution = torch.tensor(data.shape[1:], device=x.device)
ix = (x_unit * resolution).long()
ix = torch.clamp(ix, max=resolution - 1)
mip = torch.clamp(mip, max=data.shape[0] - 1)
return data[mip, ix[:, 0], ix[:, 1], ix[:, 2]] * selector, selector | Query the grid values at the given points. This function assumes the aabbs of multiple grids are 2x scaled. Args: x: (N, 3) tensor of points to query. data: (m, resx, resy, resz) tensor of grid values base_aabb: (6,) aabb of base level grid. |
189,563 | from torch import Tensor
from .scan import exclusive_sum
from .volrend import accumulate_along_rays
def exclusive_sum(
inputs: Tensor,
packed_info: Optional[Tensor] = None,
indices: Optional[Tensor] = None,
) -> Tensor:
"""Exclusive Sum that supports flattened tensor.
Similar to :func:`nerfacc.inclusive_sum`, but computes the exclusive sum.
Args:
inputs: The tensor to be summed. Can be either a N-D tensor, or a flattened
tensor with either `packed_info` or `indices` specified.
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened input tensor, with in total n_rays chunks.
If None, the input is assumed to be a N-D tensor and the sum is computed
along the last dimension. Default is None.
indices: A flattened tensor with the same shape as `inputs`.
Returns:
The exclusive sum with the same shape as the input tensor.
Example:
.. code-block:: python
>>> inputs = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8., 9.], device="cuda")
>>> packed_info = torch.tensor([[0, 2], [2, 3], [5, 4]], device="cuda")
>>> exclusive_sum(inputs, packed_info)
tensor([ 0., 1., 0., 3., 7., 0., 6., 13., 21.], device='cuda:0')
"""
if indices is not None and packed_info is not None:
raise ValueError(
"Only one of `indices` and `packed_info` can be specified."
)
if indices is not None:
assert (
indices.dim() == 1 and indices.shape == inputs.shape
), "indices must be 1-D with the same shape as inputs."
if _C.is_cub_available():
# Use CUB if available
outputs = _ExclusiveSumCUB.apply(indices, inputs)
else:
warnings.warn(
"Passing in `indices` without CUB available is slow. Considering passing in `packed_info` instead."
)
packed_info = pack_info(ray_indices=indices)
if packed_info is not None:
assert inputs.dim() == 1, "inputs must be flattened."
assert (
packed_info.dim() == 2 and packed_info.shape[-1] == 2
), "packed_info must be 2-D with shape (B, 2)."
chunk_starts, chunk_cnts = packed_info.unbind(dim=-1)
outputs = _ExclusiveSum.apply(chunk_starts, chunk_cnts, inputs, False)
if indices is None and packed_info is None:
# Batched exclusive sum on the last dimension.
outputs = torch.cumsum(
torch.cat(
[torch.zeros_like(inputs[..., :1]), inputs[..., :-1]], dim=-1
),
dim=-1,
)
return outputs
def accumulate_along_rays(
weights: Tensor,
values: Optional[Tensor] = None,
ray_indices: Optional[Tensor] = None,
n_rays: Optional[int] = None,
) -> Tensor:
"""Accumulate volumetric values along the ray.
This function supports both batched inputs and flattened inputs with
`ray_indices` and `n_rays` provided.
Note:
This function is differentiable to `weights` and `values`.
Args:
weights: Weights to be accumulated. If `ray_indices` not provided,
`weights` must be batched with shape (n_rays, n_samples). Else it
must be flattened with shape (all_samples,).
values: Values to be accumulated. If `ray_indices` not provided,
`values` must be batched with shape (n_rays, n_samples, D). Else it
must be flattened with shape (all_samples, D). None means
we accumulate weights along rays. Default: None.
ray_indices: Ray indices of the samples with shape (all_samples,).
If provided, `weights` must be a flattened tensor with shape (all_samples,)
and values (if not None) must be a flattened tensor with shape (all_samples, D).
Default: None.
n_rays: Number of rays. Should be provided together with `ray_indices`. Default: None.
Returns:
Accumulated values with shape (n_rays, D). If `values` is not given we return
the accumulated weights, in which case D == 1.
Examples:
.. code-block:: python
# Rendering: accumulate rgbs, opacities, and depths along the rays.
colors = accumulate_along_rays(weights, rgbs, ray_indices, n_rays)
opacities = accumulate_along_rays(weights, None, ray_indices, n_rays)
depths = accumulate_along_rays(
weights,
(t_starts + t_ends)[:, None] / 2.0,
ray_indices,
n_rays,
)
# (n_rays, 3), (n_rays, 1), (n_rays, 1)
print(colors.shape, opacities.shape, depths.shape)
"""
if values is None:
src = weights[..., None]
else:
assert values.dim() == weights.dim() + 1
assert weights.shape == values.shape[:-1]
src = weights[..., None] * values
if ray_indices is not None:
assert n_rays is not None, "n_rays must be provided"
assert weights.dim() == 1, "weights must be flattened"
outputs = torch.zeros(
(n_rays, src.shape[-1]), device=src.device, dtype=src.dtype
)
outputs.index_add_(0, ray_indices, src)
else:
outputs = torch.sum(src, dim=-2)
return outputs
The provided code snippet includes necessary dependencies for implementing the `distortion` function. Write a Python function `def distortion( weights: Tensor, t_starts: Tensor, t_ends: Tensor, ray_indices: Tensor, n_rays: int, ) -> Tensor` to solve the following problem:
Distortion Regularization proposed in Mip-NeRF 360. Args: weights: The flattened weights of the samples. Shape (n_samples,) t_starts: The start points of the samples. Shape (n_samples,) t_ends: The end points of the samples. Shape (n_samples,) ray_indices: The ray indices of the samples. LongTensor with shape (n_samples,) n_rays: The total number of rays. Returns: The per-ray distortion loss with the shape (n_rays, 1).
Here is the function:
def distortion(
weights: Tensor,
t_starts: Tensor,
t_ends: Tensor,
ray_indices: Tensor,
n_rays: int,
) -> Tensor:
"""Distortion Regularization proposed in Mip-NeRF 360.
Args:
weights: The flattened weights of the samples. Shape (n_samples,)
t_starts: The start points of the samples. Shape (n_samples,)
t_ends: The end points of the samples. Shape (n_samples,)
ray_indices: The ray indices of the samples. LongTensor with shape (n_samples,)
n_rays: The total number of rays.
Returns:
The per-ray distortion loss with the shape (n_rays, 1).
"""
assert (
weights.shape == t_starts.shape == t_ends.shape == ray_indices.shape
), (
f"the shape of the inputs are not the same: "
f"weights {weights.shape}, t_starts {t_starts.shape}, "
f"t_ends {t_ends.shape}, ray_indices {ray_indices.shape}"
)
t_mids = 0.5 * (t_starts + t_ends)
t_deltas = t_ends - t_starts
loss_uni = (1 / 3) * (t_deltas * weights.pow(2))
loss_bi_0 = weights * t_mids * exclusive_sum(weights, indices=ray_indices)
loss_bi_1 = weights * exclusive_sum(weights * t_mids, indices=ray_indices)
loss_bi = 2 * (loss_bi_0 - loss_bi_1)
loss = loss_uni + loss_bi
loss = accumulate_along_rays(loss, None, ray_indices, n_rays)
return loss | Distortion Regularization proposed in Mip-NeRF 360. Args: weights: The flattened weights of the samples. Shape (n_samples,) t_starts: The start points of the samples. Shape (n_samples,) t_ends: The end points of the samples. Shape (n_samples,) ray_indices: The ray indices of the samples. LongTensor with shape (n_samples,) n_rays: The total number of rays. Returns: The per-ray distortion loss with the shape (n_rays, 1). |
189,564 | from typing import Callable, List, Optional, Tuple, Union
import torch
from torch import Tensor
from ..grid import _enlarge_aabb, traverse_grids
from ..volrend import (
render_visibility_from_alpha,
render_visibility_from_density,
)
from .base import AbstractEstimator
The provided code snippet includes necessary dependencies for implementing the `_meshgrid3d` function. Write a Python function `def _meshgrid3d( res: Tensor, device: Union[torch.device, str] = "cpu" ) -> Tensor` to solve the following problem:
Create 3D grid coordinates.
Here is the function:
def _meshgrid3d(
res: Tensor, device: Union[torch.device, str] = "cpu"
) -> Tensor:
"""Create 3D grid coordinates."""
assert len(res) == 3
res = res.tolist()
return torch.stack(
torch.meshgrid(
[
torch.arange(res[0], dtype=torch.long),
torch.arange(res[1], dtype=torch.long),
torch.arange(res[2], dtype=torch.long),
],
indexing="ij",
),
dim=-1,
).to(device) | Create 3D grid coordinates. |
189,565 | from typing import Callable, List, Optional, Tuple
import torch
from torch import Tensor
from ..data_specs import RayIntervals
from ..pdf import importance_sampling, searchsorted
from ..volrend import render_transmittance_from_density
from .base import AbstractEstimator
def get_proposal_requires_grad_fn(
target: float = 5.0, num_steps: int = 1000
) -> Callable:
schedule = lambda s: min(s / num_steps, 1.0) * target
steps_since_last_grad = 0
def proposal_requires_grad_fn(step: int) -> bool:
nonlocal steps_since_last_grad
target_steps_since_last_grad = schedule(step)
requires_grad = steps_since_last_grad > target_steps_since_last_grad
if requires_grad:
steps_since_last_grad = 0
steps_since_last_grad += 1
return requires_grad
return proposal_requires_grad_fn | null |
189,566 | from typing import Callable, List, Optional, Tuple
import torch
from torch import Tensor
from ..data_specs import RayIntervals
from ..pdf import importance_sampling, searchsorted
from ..volrend import render_transmittance_from_density
from .base import AbstractEstimator
def _transform_stot(
transform_type: Literal["uniform", "lindisp"],
s_vals: torch.Tensor,
t_min: torch.Tensor,
t_max: torch.Tensor,
) -> torch.Tensor:
if transform_type == "uniform":
_contract_fn, _icontract_fn = lambda x: x, lambda x: x
elif transform_type == "lindisp":
_contract_fn, _icontract_fn = lambda x: 1 / x, lambda x: 1 / x
else:
raise ValueError(f"Unknown transform_type: {transform_type}")
s_min, s_max = _contract_fn(t_min), _contract_fn(t_max)
icontract_fn = lambda s: _icontract_fn(s * s_max + (1 - s) * s_min)
return icontract_fn(s_vals) | null |
189,567 | from typing import Callable, List, Optional, Tuple
import torch
from torch import Tensor
from ..data_specs import RayIntervals
from ..pdf import importance_sampling, searchsorted
from ..volrend import render_transmittance_from_density
from .base import AbstractEstimator
class RayIntervals:
"""Ray intervals that supports batched and flattened data.
Each interval is defined by two edges (left and right). The attribute `vals`
stores the edges of all intervals along the rays. The attributes `is_left`
and `is_right` are for indicating whether each edge is a left or right edge.
This class unifies the representation of both continuous and non-continuous ray
intervals.
Note:
When `vals` is flattened, either `packed_info` or `ray_indices` must
be provided. Also both `is_left` and `is_right` must be provided.
Args:
vals: Batched data with shape (n_rays, n_edges) or flattened data
with shape (all_edges,)
packed_info: Optional. A tensor of shape (n_rays, 2) that specifies
the start and count of each chunk in flattened `vals`, with in
total n_rays chunks. Only needed when `vals` is flattened.
ray_indices: Optional. A tensor of shape (all_edges,) that specifies
the ray index of each edge. Only needed when `vals` is flattened.
is_left: Optional. A boolen tensor of shape (all_edges,) that specifies
whether each edge is a left edge. Only needed when `vals` is flattened.
is_right: Optional. A boolen tensor of shape (all_edges,) that specifies
whether each edge is a right edge. Only needed when `vals` is flattened.
Examples:
.. code-block:: python
>>> # Batched data
>>> ray_intervals = RayIntervals(torch.rand(10, 100))
>>> # Flattened data
>>> ray_intervals = RayIntervals(
>>> torch.rand(6),
>>> packed_info=torch.tensor([[0, 2], [2, 0], [2, 4]]),
>>> is_left=torch.tensor([True, False, True, True, True, False]),
>>> is_right=torch.tensor([False, True, False, True, True, True]),
>>> )
"""
vals: torch.Tensor
packed_info: Optional[torch.Tensor] = None
ray_indices: Optional[torch.Tensor] = None
is_left: Optional[torch.Tensor] = None
is_right: Optional[torch.Tensor] = None
def _to_cpp(self):
"""
Generate object to pass to C++
"""
spec = _C.RaySegmentsSpec()
spec.vals = self.vals.contiguous()
if self.packed_info is not None:
spec.chunk_starts = self.packed_info[:, 0].contiguous()
if self.packed_info is not None:
spec.chunk_cnts = self.packed_info[:, 1].contiguous()
if self.ray_indices is not None:
spec.ray_indices = self.ray_indices.contiguous()
if self.is_left is not None:
spec.is_left = self.is_left.contiguous()
if self.is_right is not None:
spec.is_right = self.is_right.contiguous()
return spec
def _from_cpp(cls, spec):
"""
Generate object from C++
"""
if spec.chunk_starts is not None and spec.chunk_cnts is not None:
packed_info = torch.stack([spec.chunk_starts, spec.chunk_cnts], -1)
else:
packed_info = None
ray_indices = spec.ray_indices
is_left = spec.is_left
is_right = spec.is_right
return cls(
vals=spec.vals,
packed_info=packed_info,
ray_indices=ray_indices,
is_left=is_left,
is_right=is_right,
)
def device(self) -> torch.device:
return self.vals.device
def searchsorted(
sorted_sequence: Union[RayIntervals, RaySamples],
values: Union[RayIntervals, RaySamples],
) -> Tuple[Tensor, Tensor]:
"""Searchsorted that supports flattened tensor.
This function returns {`ids_left`, `ids_right`} such that:
`sorted_sequence.vals.gather(-1, ids_left) <= values.vals < sorted_sequence.vals.gather(-1, ids_right)`
Note:
When values is out of range of sorted_sequence, we return the
corresponding ids as if the values is clipped to the range of
sorted_sequence. See the example below.
Args:
sorted_sequence: A :class:`RayIntervals` or :class:`RaySamples` object. We assume
the `sorted_sequence.vals` is acendingly sorted for each ray.
values: A :class:`RayIntervals` or :class:`RaySamples` object.
Returns:
A tuple of LongTensor:
- **ids_left**: A LongTensor with the same shape as `values.vals`.
- **ids_right**: A LongTensor with the same shape as `values.vals`.
Example:
>>> sorted_sequence = RayIntervals(
... vals=torch.tensor([0.0, 1.0, 0.0, 1.0, 2.0], device="cuda"),
... packed_info=torch.tensor([[0, 2], [2, 3]], device="cuda"),
... )
>>> values = RayIntervals(
... vals=torch.tensor([0.5, 1.5, 2.5], device="cuda"),
... packed_info=torch.tensor([[0, 1], [1, 2]], device="cuda"),
... )
>>> ids_left, ids_right = searchsorted(sorted_sequence, values)
>>> ids_left
tensor([0, 3, 3], device='cuda:0')
>>> ids_right
tensor([1, 4, 4], device='cuda:0')
>>> sorted_sequence.vals.gather(-1, ids_left)
tensor([0., 1., 1.], device='cuda:0')
>>> sorted_sequence.vals.gather(-1, ids_right)
tensor([1., 2., 2.], device='cuda:0')
"""
ids_left, ids_right = _C.searchsorted(
values._to_cpp(), sorted_sequence._to_cpp()
)
return ids_left, ids_right
def _pdf_loss(
segments_query: RayIntervals,
cdfs_query: torch.Tensor,
segments_key: RayIntervals,
cdfs_key: torch.Tensor,
eps: float = 1e-7,
) -> torch.Tensor:
ids_left, ids_right = searchsorted(segments_key, segments_query)
if segments_query.vals.dim() > 1:
w = cdfs_query[..., 1:] - cdfs_query[..., :-1]
ids_left = ids_left[..., :-1]
ids_right = ids_right[..., 1:]
else:
# TODO: not tested for this branch.
assert segments_query.is_left is not None
assert segments_query.is_right is not None
w = (
cdfs_query[segments_query.is_right]
- cdfs_query[segments_query.is_left]
)
ids_left = ids_left[segments_query.is_left]
ids_right = ids_right[segments_query.is_right]
w_outer = cdfs_key.gather(-1, ids_right) - cdfs_key.gather(-1, ids_left)
return torch.clip(w - w_outer, min=0) ** 2 / (w + eps) | null |
189,568 | from typing import Callable, List, Optional, Tuple
import torch
from torch import Tensor
from ..data_specs import RayIntervals
from ..pdf import importance_sampling, searchsorted
from ..volrend import render_transmittance_from_density
from .base import AbstractEstimator
def _outer(
t0_starts: torch.Tensor,
t0_ends: torch.Tensor,
t1_starts: torch.Tensor,
t1_ends: torch.Tensor,
y1: torch.Tensor,
) -> torch.Tensor:
"""
Args:
t0_starts: (..., S0).
t0_ends: (..., S0).
t1_starts: (..., S1).
t1_ends: (..., S1).
y1: (..., S1).
"""
cy1 = torch.cat(
[torch.zeros_like(y1[..., :1]), torch.cumsum(y1, dim=-1)], dim=-1
)
idx_lo = (
torch.searchsorted(
t1_starts.contiguous(), t0_starts.contiguous(), side="right"
)
- 1
)
idx_lo = torch.clamp(idx_lo, min=0, max=y1.shape[-1] - 1)
idx_hi = torch.searchsorted(
t1_ends.contiguous(), t0_ends.contiguous(), side="right"
)
idx_hi = torch.clamp(idx_hi, min=0, max=y1.shape[-1] - 1)
cy1_lo = torch.take_along_dim(cy1[..., :-1], idx_lo, dim=-1)
cy1_hi = torch.take_along_dim(cy1[..., 1:], idx_hi, dim=-1)
y0_outer = cy1_hi - cy1_lo
return y0_outer
The provided code snippet includes necessary dependencies for implementing the `_lossfun_outer` function. Write a Python function `def _lossfun_outer( t: torch.Tensor, w: torch.Tensor, t_env: torch.Tensor, w_env: torch.Tensor, )` to solve the following problem:
Args: t: interval edges, (..., S + 1). w: weights, (..., S). t_env: interval edges of the upper bound enveloping historgram, (..., S + 1). w_env: weights that should upper bound the inner (t,w) histogram, (..., S).
Here is the function:
def _lossfun_outer(
t: torch.Tensor,
w: torch.Tensor,
t_env: torch.Tensor,
w_env: torch.Tensor,
):
"""
Args:
t: interval edges, (..., S + 1).
w: weights, (..., S).
t_env: interval edges of the upper bound enveloping historgram, (..., S + 1).
w_env: weights that should upper bound the inner (t,w) histogram, (..., S).
"""
eps = torch.finfo(t.dtype).eps
w_outer = _outer(
t[..., :-1], t[..., 1:], t_env[..., :-1], t_env[..., 1:], w_env
)
return torch.clip(w - w_outer, min=0) ** 2 / (w + eps) | Args: t: interval edges, (..., S + 1). w: weights, (..., S). t_env: interval edges of the upper bound enveloping historgram, (..., S + 1). w_env: weights that should upper bound the inner (t,w) histogram, (..., S). |
189,569 | from typing import Any, Callable, List, Mapping, Optional, Tuple, Union
fVDB_ENABLED = True
import torch
from torch import Tensor
from nerfacc.estimators.base import AbstractEstimator
from nerfacc.volrend import (
render_visibility_from_alpha,
render_visibility_from_density,
)
The provided code snippet includes necessary dependencies for implementing the `traverse_vdbs` function. Write a Python function `def traverse_vdbs( # rays rays_o: Tensor, # [n_rays, 3] rays_d: Tensor, # [n_rays, 3] # grids grids: GridBatch, # options near_planes: Optional[Tensor] = None, # [n_rays] far_planes: Optional[Tensor] = None, # [n_rays] step_size: Optional[float] = 1e-3, cone_angle: Optional[float] = 0.0, )` to solve the following problem:
Traverse the fVDB grids.
Here is the function:
def traverse_vdbs(
# rays
rays_o: Tensor, # [n_rays, 3]
rays_d: Tensor, # [n_rays, 3]
# grids
grids: GridBatch,
# options
near_planes: Optional[Tensor] = None, # [n_rays]
far_planes: Optional[Tensor] = None, # [n_rays]
step_size: Optional[float] = 1e-3,
cone_angle: Optional[float] = 0.0,
):
"""Traverse the fVDB grids."""
assert fVDB_ENABLED, "Please install fVDB to use this function."
assert len(grids) == 1, "Only support one grid for now."
if near_planes is None:
near_planes = torch.zeros_like(rays_o[:, 0])
if far_planes is None:
far_planes = torch.full_like(rays_o[:, 0], float("inf"))
_, indices, intervals = grids.uniform_ray_samples(
rays_o,
rays_d,
near_planes,
far_planes,
step_size,
cone_angle,
# Use the midpoint of the sample intervals to determine occupancy.
include_end_segments=False,
)
t_starts, t_ends = torch.unbind(intervals.jdata, dim=-1)
ray_indices = indices.jdata.long()
# TODO(ruilongli): In fvdb, we would like to restrain the endpoints of the sample
# intervals to be within the grid boundaries.
return t_starts, t_ends, ray_indices | Traverse the fVDB grids. |
189,570 | import math
from typing import Callable, List, Optional, Tuple, Union
import torch
from torch import Tensor
from ..grid import _enlarge_aabb
from ..volrend import (
render_visibility_from_alpha,
render_visibility_from_density,
)
from .base import AbstractEstimator
def occ_eval_fn(x):
return torch.rand(len(x), 1) | null |
189,571 | from typing import Callable, Dict, Optional, Tuple
import torch
from torch import Tensor
from .cuda import is_cub_available
from .pack import pack_info
from .scan import exclusive_prod, exclusive_sum
def render_transmittance_from_alpha(
alphas: Tensor,
packed_info: Optional[Tensor] = None,
ray_indices: Optional[Tensor] = None,
n_rays: Optional[int] = None,
prefix_trans: Optional[Tensor] = None,
) -> Tensor:
"""Compute transmittance :math:`T_i` from alpha :math:`\\alpha_i`.
.. math::
T_i = \\prod_{j=1}^{i-1}(1-\\alpha_j)
This function supports both batched and flattened input tensor. For flattened input tensor, either
(`packed_info`) or (`ray_indices` and `n_rays`) should be provided.
Args:
alphas: The opacity values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened samples, with in total n_rays chunks.
Useful for flattened input.
ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).
n_rays: Number of rays. Only useful when `ray_indices` is provided.
prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).
Returns:
The rendering transmittance with the same shape as `alphas`.
Examples:
.. code-block:: python
>>> alphas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda")
>>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda")
>>> transmittance = render_transmittance_from_alpha(alphas, ray_indices=ray_indices)
tensor([1.0, 0.6, 0.12, 1.0, 0.2, 1.0, 1.0])
"""
# FIXME Try not to use exclusive_prod because:
# 1. torch.cumprod is much slower than torch.cumsum
# 2. exclusive_prod gradient on input == 0 is not correct.
if not is_cub_available() and packed_info is None:
# Convert ray indices to packed info
packed_info = pack_info(ray_indices, n_rays)
ray_indices = None
trans = exclusive_prod(
1 - alphas, packed_info=packed_info, indices=ray_indices
)
if prefix_trans is not None:
trans *= prefix_trans
return trans
The provided code snippet includes necessary dependencies for implementing the `render_visibility_from_alpha` function. Write a Python function `def render_visibility_from_alpha( alphas: Tensor, packed_info: Optional[Tensor] = None, ray_indices: Optional[Tensor] = None, n_rays: Optional[int] = None, early_stop_eps: float = 1e-4, alpha_thre: float = 0.0, prefix_trans: Optional[Tensor] = None, ) -> Tensor` to solve the following problem:
Compute visibility from opacity :math:`\\alpha_i`. In this function, we first compute the transmittance from the sample opacity. The transmittance is then used to filter out occluded samples. And opacity is used to filter out transparent samples. The function returns a boolean tensor indicating which samples are visible (`transmittance > early_stop_eps` and `opacity > alpha_thre`). This function supports both batched and flattened input tensor. For flattened input tensor, either (`packed_info`) or (`ray_indices` and `n_rays`) should be provided. Args: alphas: The opacity values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples). packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened samples, with in total n_rays chunks. Useful for flattened input. ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples). n_rays: Number of rays. Only useful when `ray_indices` is provided. early_stop_eps: The early stopping threshold on transmittance. alpha_thre: The threshold on opacity. prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,). Returns: A boolean tensor indicating which samples are visible. Same shape as `alphas`. Examples: .. code-block:: python >>> alphas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda") >>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda") >>> transmittance = render_transmittance_from_alpha(alphas, ray_indices=ray_indices) tensor([1.0, 0.6, 0.12, 1.0, 0.2, 1.0, 1.0]) >>> visibility = render_visibility_from_alpha( >>> alphas, ray_indices=ray_indices, early_stop_eps=0.3, alpha_thre=0.2) tensor([True, True, False, True, False, False, True])
Here is the function:
def render_visibility_from_alpha(
alphas: Tensor,
packed_info: Optional[Tensor] = None,
ray_indices: Optional[Tensor] = None,
n_rays: Optional[int] = None,
early_stop_eps: float = 1e-4,
alpha_thre: float = 0.0,
prefix_trans: Optional[Tensor] = None,
) -> Tensor:
"""Compute visibility from opacity :math:`\\alpha_i`.
In this function, we first compute the transmittance from the sample opacity. The
transmittance is then used to filter out occluded samples. And opacity is used to
filter out transparent samples. The function returns a boolean tensor indicating
which samples are visible (`transmittance > early_stop_eps` and `opacity > alpha_thre`).
This function supports both batched and flattened input tensor. For flattened input tensor, either
(`packed_info`) or (`ray_indices` and `n_rays`) should be provided.
Args:
alphas: The opacity values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened samples, with in total n_rays chunks.
Useful for flattened input.
ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).
n_rays: Number of rays. Only useful when `ray_indices` is provided.
early_stop_eps: The early stopping threshold on transmittance.
alpha_thre: The threshold on opacity.
prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).
Returns:
A boolean tensor indicating which samples are visible. Same shape as `alphas`.
Examples:
.. code-block:: python
>>> alphas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda")
>>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda")
>>> transmittance = render_transmittance_from_alpha(alphas, ray_indices=ray_indices)
tensor([1.0, 0.6, 0.12, 1.0, 0.2, 1.0, 1.0])
>>> visibility = render_visibility_from_alpha(
>>> alphas, ray_indices=ray_indices, early_stop_eps=0.3, alpha_thre=0.2)
tensor([True, True, False, True, False, False, True])
"""
trans = render_transmittance_from_alpha(
alphas, packed_info, ray_indices, n_rays, prefix_trans
)
vis = trans >= early_stop_eps
if alpha_thre > 0:
vis = vis & (alphas >= alpha_thre)
return vis | Compute visibility from opacity :math:`\\alpha_i`. In this function, we first compute the transmittance from the sample opacity. The transmittance is then used to filter out occluded samples. And opacity is used to filter out transparent samples. The function returns a boolean tensor indicating which samples are visible (`transmittance > early_stop_eps` and `opacity > alpha_thre`). This function supports both batched and flattened input tensor. For flattened input tensor, either (`packed_info`) or (`ray_indices` and `n_rays`) should be provided. Args: alphas: The opacity values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples). packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened samples, with in total n_rays chunks. Useful for flattened input. ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples). n_rays: Number of rays. Only useful when `ray_indices` is provided. early_stop_eps: The early stopping threshold on transmittance. alpha_thre: The threshold on opacity. prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,). Returns: A boolean tensor indicating which samples are visible. Same shape as `alphas`. Examples: .. code-block:: python >>> alphas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda") >>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda") >>> transmittance = render_transmittance_from_alpha(alphas, ray_indices=ray_indices) tensor([1.0, 0.6, 0.12, 1.0, 0.2, 1.0, 1.0]) >>> visibility = render_visibility_from_alpha( >>> alphas, ray_indices=ray_indices, early_stop_eps=0.3, alpha_thre=0.2) tensor([True, True, False, True, False, False, True]) |
189,572 | from typing import Callable, Dict, Optional, Tuple
import torch
from torch import Tensor
from .cuda import is_cub_available
from .pack import pack_info
from .scan import exclusive_prod, exclusive_sum
def render_transmittance_from_density(
t_starts: Tensor,
t_ends: Tensor,
sigmas: Tensor,
packed_info: Optional[Tensor] = None,
ray_indices: Optional[Tensor] = None,
n_rays: Optional[int] = None,
prefix_trans: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
"""Compute transmittance :math:`T_i` from density :math:`\\sigma_i`.
.. math::
T_i = exp(-\\sum_{j=1}^{i-1}\\sigma_j\delta_j)
This function supports both batched and flattened input tensor. For flattened input tensor, either
(`packed_info`) or (`ray_indices` and `n_rays`) should be provided.
Args:
t_starts: Where the frustum-shape sample starts along a ray. Tensor with \
shape (all_samples,) or (n_rays, n_samples).
t_ends: Where the frustum-shape sample ends along a ray. Tensor with \
shape (all_samples,) or (n_rays, n_samples).
sigmas: The density values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened samples, with in total n_rays chunks.
Useful for flattened input.
ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).
n_rays: Number of rays. Only useful when `ray_indices` is provided.
prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).
Returns:
The rendering transmittance and opacities, both with the same shape as `sigmas`.
Examples:
.. code-block:: python
>>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device="cuda")
>>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device="cuda")
>>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda")
>>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda")
>>> transmittance, alphas = render_transmittance_from_density(
>>> t_starts, t_ends, sigmas, ray_indices=ray_indices)
transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00]
alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59]
"""
if not is_cub_available() and packed_info is None:
# Convert ray indices to packed info
packed_info = pack_info(ray_indices, n_rays)
ray_indices = None
sigmas_dt = sigmas * (t_ends - t_starts)
alphas = 1.0 - torch.exp(-sigmas_dt)
trans = torch.exp(
-exclusive_sum(sigmas_dt, packed_info=packed_info, indices=ray_indices)
)
if prefix_trans is not None:
trans = trans * prefix_trans
return trans, alphas
The provided code snippet includes necessary dependencies for implementing the `render_visibility_from_density` function. Write a Python function `def render_visibility_from_density( t_starts: Tensor, t_ends: Tensor, sigmas: Tensor, packed_info: Optional[Tensor] = None, ray_indices: Optional[Tensor] = None, n_rays: Optional[int] = None, early_stop_eps: float = 1e-4, alpha_thre: float = 0.0, prefix_trans: Optional[Tensor] = None, ) -> Tensor` to solve the following problem:
Compute visibility from density :math:`\\sigma_i` and interval :math:`\\delta_i`. In this function, we first compute the transmittance and opacity from the sample density. The transmittance is then used to filter out occluded samples. And opacity is used to filter out transparent samples. The function returns a boolean tensor indicating which samples are visible (`transmittance > early_stop_eps` and `opacity > alpha_thre`). This function supports both batched and flattened input tensor. For flattened input tensor, either (`packed_info`) or (`ray_indices` and `n_rays`) should be provided. Args: alphas: The opacity values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples). packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened samples, with in total n_rays chunks. Useful for flattened input. ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples). n_rays: Number of rays. Only useful when `ray_indices` is provided. early_stop_eps: The early stopping threshold on transmittance. alpha_thre: The threshold on opacity. prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,). Returns: A boolean tensor indicating which samples are visible. Same shape as `alphas`. Examples: .. code-block:: python >>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device="cuda") >>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device="cuda") >>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda") >>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda") >>> transmittance, alphas = render_transmittance_from_density( >>> t_starts, t_ends, sigmas, ray_indices=ray_indices) transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00] alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59] >>> visibility = render_visibility_from_density( >>> t_starts, t_ends, sigmas, ray_indices=ray_indices, early_stop_eps=0.3, alpha_thre=0.2) tensor([True, True, False, True, False, False, True])
Here is the function:
def render_visibility_from_density(
t_starts: Tensor,
t_ends: Tensor,
sigmas: Tensor,
packed_info: Optional[Tensor] = None,
ray_indices: Optional[Tensor] = None,
n_rays: Optional[int] = None,
early_stop_eps: float = 1e-4,
alpha_thre: float = 0.0,
prefix_trans: Optional[Tensor] = None,
) -> Tensor:
"""Compute visibility from density :math:`\\sigma_i` and interval :math:`\\delta_i`.
In this function, we first compute the transmittance and opacity from the sample density. The
transmittance is then used to filter out occluded samples. And opacity is used to
filter out transparent samples. The function returns a boolean tensor indicating
which samples are visible (`transmittance > early_stop_eps` and `opacity > alpha_thre`).
This function supports both batched and flattened input tensor. For flattened input tensor, either
(`packed_info`) or (`ray_indices` and `n_rays`) should be provided.
Args:
alphas: The opacity values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened samples, with in total n_rays chunks.
Useful for flattened input.
ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).
n_rays: Number of rays. Only useful when `ray_indices` is provided.
early_stop_eps: The early stopping threshold on transmittance.
alpha_thre: The threshold on opacity.
prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).
Returns:
A boolean tensor indicating which samples are visible. Same shape as `alphas`.
Examples:
.. code-block:: python
>>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device="cuda")
>>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device="cuda")
>>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda")
>>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda")
>>> transmittance, alphas = render_transmittance_from_density(
>>> t_starts, t_ends, sigmas, ray_indices=ray_indices)
transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00]
alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59]
>>> visibility = render_visibility_from_density(
>>> t_starts, t_ends, sigmas, ray_indices=ray_indices, early_stop_eps=0.3, alpha_thre=0.2)
tensor([True, True, False, True, False, False, True])
"""
trans, alphas = render_transmittance_from_density(
t_starts, t_ends, sigmas, packed_info, ray_indices, n_rays, prefix_trans
)
vis = trans >= early_stop_eps
if alpha_thre > 0:
vis = vis & (alphas >= alpha_thre)
return vis | Compute visibility from density :math:`\\sigma_i` and interval :math:`\\delta_i`. In this function, we first compute the transmittance and opacity from the sample density. The transmittance is then used to filter out occluded samples. And opacity is used to filter out transparent samples. The function returns a boolean tensor indicating which samples are visible (`transmittance > early_stop_eps` and `opacity > alpha_thre`). This function supports both batched and flattened input tensor. For flattened input tensor, either (`packed_info`) or (`ray_indices` and `n_rays`) should be provided. Args: alphas: The opacity values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples). packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened samples, with in total n_rays chunks. Useful for flattened input. ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples). n_rays: Number of rays. Only useful when `ray_indices` is provided. early_stop_eps: The early stopping threshold on transmittance. alpha_thre: The threshold on opacity. prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,). Returns: A boolean tensor indicating which samples are visible. Same shape as `alphas`. Examples: .. code-block:: python >>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device="cuda") >>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device="cuda") >>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device="cuda") >>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device="cuda") >>> transmittance, alphas = render_transmittance_from_density( >>> t_starts, t_ends, sigmas, ray_indices=ray_indices) transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00] alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59] >>> visibility = render_visibility_from_density( >>> t_starts, t_ends, sigmas, ray_indices=ray_indices, early_stop_eps=0.3, alpha_thre=0.2) tensor([True, True, False, True, False, False, True]) |
189,573 | import warnings
from typing import Optional
import torch
from torch import Tensor
from . import cuda as _C
from .pack import pack_info
class _InclusiveSum(torch.autograd.Function):
"""Inclusive Sum on a Flattened Tensor."""
def forward(ctx, chunk_starts, chunk_cnts, inputs, normalize: bool = False):
chunk_starts = chunk_starts.contiguous()
chunk_cnts = chunk_cnts.contiguous()
inputs = inputs.contiguous()
outputs = _C.inclusive_sum(
chunk_starts, chunk_cnts, inputs, normalize, False
)
if ctx.needs_input_grad[2]:
ctx.normalize = normalize
ctx.save_for_backward(chunk_starts, chunk_cnts)
return outputs
def backward(ctx, grad_outputs):
grad_outputs = grad_outputs.contiguous()
chunk_starts, chunk_cnts = ctx.saved_tensors
normalize = ctx.normalize
assert normalize == False, "Only support backward for normalize==False."
grad_inputs = _C.inclusive_sum(
chunk_starts, chunk_cnts, grad_outputs, normalize, True
)
return None, None, grad_inputs, None
class _InclusiveSumCUB(torch.autograd.Function):
"""Inclusive Sum on a Flattened Tensor with CUB."""
def forward(ctx, indices, inputs):
indices = indices.contiguous()
inputs = inputs.contiguous()
outputs = _C.inclusive_sum_cub(indices, inputs, False)
if ctx.needs_input_grad[1]:
ctx.save_for_backward(indices)
return outputs
def backward(ctx, grad_outputs):
grad_outputs = grad_outputs.contiguous()
(indices,) = ctx.saved_tensors
grad_inputs = _C.inclusive_sum_cub(indices, grad_outputs, True)
return None, grad_inputs
def pack_info(ray_indices: Tensor, n_rays: Optional[int] = None) -> Tensor:
"""Pack `ray_indices` to `packed_info`. Useful for converting per sample data to per ray data.
Note:
this function is not differentiable to any inputs.
Args:
ray_indices: Ray indices of the samples. LongTensor with shape (n_sample).
n_rays: Number of rays. If None, it is inferred from `ray_indices`. Default is None.
Returns:
A LongTensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened input tensor, with in total n_rays chunks.
Example:
.. code-block:: python
>>> ray_indices = torch.tensor([0, 0, 1, 1, 1, 2, 2, 2, 2], device="cuda")
>>> packed_info = pack_info(ray_indices, n_rays=3)
>>> packed_info
tensor([[0, 2], [2, 3], [5, 4]], device='cuda:0')
"""
assert (
ray_indices.dim() == 1
), "ray_indices must be a 1D tensor with shape (n_samples)."
if ray_indices.is_cuda:
device = ray_indices.device
dtype = ray_indices.dtype
if n_rays is None:
n_rays = ray_indices.max().item() + 1
chunk_cnts = torch.zeros((n_rays,), device=device, dtype=dtype)
chunk_cnts.index_add_(0, ray_indices, torch.ones_like(ray_indices))
chunk_starts = chunk_cnts.cumsum(dim=0, dtype=dtype) - chunk_cnts
packed_info = torch.stack([chunk_starts, chunk_cnts], dim=-1)
else:
raise NotImplementedError("Only support cuda inputs.")
return packed_info
The provided code snippet includes necessary dependencies for implementing the `inclusive_sum` function. Write a Python function `def inclusive_sum( inputs: Tensor, packed_info: Optional[Tensor] = None, indices: Optional[Tensor] = None, ) -> Tensor` to solve the following problem:
Inclusive Sum that supports flattened tensor. This function is equivalent to `torch.cumsum(inputs, dim=-1)`, but allows for a flattened input tensor and a `packed_info` tensor that specifies the chunks in the flattened input. Args: inputs: The tensor to be summed. Can be either a N-D tensor, or a flattened tensor with either `packed_info` or `indices` specified. packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened input tensor, with in total n_rays chunks. If None, the input is assumed to be a N-D tensor and the sum is computed along the last dimension. Default is None. indices: A flattened tensor with the same shape as `inputs`. Returns: The inclusive sum with the same shape as the input tensor. Example: .. code-block:: python >>> inputs = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8., 9.], device="cuda") >>> packed_info = torch.tensor([[0, 2], [2, 3], [5, 4]], device="cuda") >>> inclusive_sum(inputs, packed_info) tensor([ 1., 3., 3., 7., 12., 6., 13., 21., 30.], device='cuda:0')
Here is the function:
def inclusive_sum(
inputs: Tensor,
packed_info: Optional[Tensor] = None,
indices: Optional[Tensor] = None,
) -> Tensor:
"""Inclusive Sum that supports flattened tensor.
This function is equivalent to `torch.cumsum(inputs, dim=-1)`, but allows
for a flattened input tensor and a `packed_info` tensor that specifies the
chunks in the flattened input.
Args:
inputs: The tensor to be summed. Can be either a N-D tensor, or a flattened
tensor with either `packed_info` or `indices` specified.
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened input tensor, with in total n_rays chunks.
If None, the input is assumed to be a N-D tensor and the sum is computed
along the last dimension. Default is None.
indices: A flattened tensor with the same shape as `inputs`.
Returns:
The inclusive sum with the same shape as the input tensor.
Example:
.. code-block:: python
>>> inputs = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8., 9.], device="cuda")
>>> packed_info = torch.tensor([[0, 2], [2, 3], [5, 4]], device="cuda")
>>> inclusive_sum(inputs, packed_info)
tensor([ 1., 3., 3., 7., 12., 6., 13., 21., 30.], device='cuda:0')
"""
if indices is not None and packed_info is not None:
raise ValueError(
"Only one of `indices` and `packed_info` can be specified."
)
if indices is not None:
assert (
indices.dim() == 1 and indices.shape == inputs.shape
), "indices must be 1-D with the same shape as inputs."
if _C.is_cub_available():
# Use CUB if available
outputs = _InclusiveSumCUB.apply(indices, inputs)
else:
warnings.warn(
"Passing in `indices` without CUB available is slow. Considering passing in `packed_info` instead."
)
packed_info = pack_info(ray_indices=indices)
if packed_info is not None:
assert inputs.dim() == 1, "inputs must be flattened."
assert (
packed_info.dim() == 2 and packed_info.shape[-1] == 2
), "packed_info must be 2-D with shape (B, 2)."
chunk_starts, chunk_cnts = packed_info.unbind(dim=-1)
outputs = _InclusiveSum.apply(chunk_starts, chunk_cnts, inputs, False)
if indices is None and packed_info is None:
# Batched inclusive sum on the last dimension.
outputs = torch.cumsum(inputs, dim=-1)
return outputs | Inclusive Sum that supports flattened tensor. This function is equivalent to `torch.cumsum(inputs, dim=-1)`, but allows for a flattened input tensor and a `packed_info` tensor that specifies the chunks in the flattened input. Args: inputs: The tensor to be summed. Can be either a N-D tensor, or a flattened tensor with either `packed_info` or `indices` specified. packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened input tensor, with in total n_rays chunks. If None, the input is assumed to be a N-D tensor and the sum is computed along the last dimension. Default is None. indices: A flattened tensor with the same shape as `inputs`. Returns: The inclusive sum with the same shape as the input tensor. Example: .. code-block:: python >>> inputs = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8., 9.], device="cuda") >>> packed_info = torch.tensor([[0, 2], [2, 3], [5, 4]], device="cuda") >>> inclusive_sum(inputs, packed_info) tensor([ 1., 3., 3., 7., 12., 6., 13., 21., 30.], device='cuda:0') |
189,574 | import warnings
from typing import Optional
import torch
from torch import Tensor
from . import cuda as _C
from .pack import pack_info
class _InclusiveProd(torch.autograd.Function):
"""Inclusive Product on a Flattened Tensor."""
def forward(ctx, chunk_starts, chunk_cnts, inputs):
chunk_starts = chunk_starts.contiguous()
chunk_cnts = chunk_cnts.contiguous()
inputs = inputs.contiguous()
outputs = _C.inclusive_prod_forward(chunk_starts, chunk_cnts, inputs)
if ctx.needs_input_grad[2]:
ctx.save_for_backward(chunk_starts, chunk_cnts, inputs, outputs)
return outputs
def backward(ctx, grad_outputs):
grad_outputs = grad_outputs.contiguous()
chunk_starts, chunk_cnts, inputs, outputs = ctx.saved_tensors
grad_inputs = _C.inclusive_prod_backward(
chunk_starts, chunk_cnts, inputs, outputs, grad_outputs
)
return None, None, grad_inputs
class _InclusiveProdCUB(torch.autograd.Function):
"""Inclusive Product on a Flattened Tensor with CUB."""
def forward(ctx, indices, inputs):
indices = indices.contiguous()
inputs = inputs.contiguous()
outputs = _C.inclusive_prod_cub_forward(indices, inputs)
if ctx.needs_input_grad[1]:
ctx.save_for_backward(indices, inputs, outputs)
return outputs
def backward(ctx, grad_outputs):
grad_outputs = grad_outputs.contiguous()
indices, inputs, outputs = ctx.saved_tensors
grad_inputs = _C.inclusive_prod_cub_backward(
indices, inputs, outputs, grad_outputs
)
return None, grad_inputs
def pack_info(ray_indices: Tensor, n_rays: Optional[int] = None) -> Tensor:
"""Pack `ray_indices` to `packed_info`. Useful for converting per sample data to per ray data.
Note:
this function is not differentiable to any inputs.
Args:
ray_indices: Ray indices of the samples. LongTensor with shape (n_sample).
n_rays: Number of rays. If None, it is inferred from `ray_indices`. Default is None.
Returns:
A LongTensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened input tensor, with in total n_rays chunks.
Example:
.. code-block:: python
>>> ray_indices = torch.tensor([0, 0, 1, 1, 1, 2, 2, 2, 2], device="cuda")
>>> packed_info = pack_info(ray_indices, n_rays=3)
>>> packed_info
tensor([[0, 2], [2, 3], [5, 4]], device='cuda:0')
"""
assert (
ray_indices.dim() == 1
), "ray_indices must be a 1D tensor with shape (n_samples)."
if ray_indices.is_cuda:
device = ray_indices.device
dtype = ray_indices.dtype
if n_rays is None:
n_rays = ray_indices.max().item() + 1
chunk_cnts = torch.zeros((n_rays,), device=device, dtype=dtype)
chunk_cnts.index_add_(0, ray_indices, torch.ones_like(ray_indices))
chunk_starts = chunk_cnts.cumsum(dim=0, dtype=dtype) - chunk_cnts
packed_info = torch.stack([chunk_starts, chunk_cnts], dim=-1)
else:
raise NotImplementedError("Only support cuda inputs.")
return packed_info
The provided code snippet includes necessary dependencies for implementing the `inclusive_prod` function. Write a Python function `def inclusive_prod( inputs: Tensor, packed_info: Optional[Tensor] = None, indices: Optional[Tensor] = None, ) -> Tensor` to solve the following problem:
Inclusive Product that supports flattened tensor. This function is equivalent to `torch.cumprod(inputs, dim=-1)`, but allows for a flattened input tensor and a `packed_info` tensor that specifies the chunks in the flattened input. Args: inputs: The tensor to be producted. Can be either a N-D tensor, or a flattened tensor with either `packed_info` or `indices` specified. packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened input tensor, with in total n_rays chunks. If None, the input is assumed to be a N-D tensor and the product is computed along the last dimension. Default is None. indices: A flattened tensor with the same shape as `inputs`. Returns: The inclusive product with the same shape as the input tensor. Example: .. code-block:: python >>> inputs = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8., 9.], device="cuda") >>> packed_info = torch.tensor([[0, 2], [2, 3], [5, 4]], device="cuda") >>> inclusive_prod(inputs, packed_info) tensor([1., 2., 3., 12., 60., 6., 42., 336., 3024.], device='cuda:0')
Here is the function:
def inclusive_prod(
inputs: Tensor,
packed_info: Optional[Tensor] = None,
indices: Optional[Tensor] = None,
) -> Tensor:
"""Inclusive Product that supports flattened tensor.
This function is equivalent to `torch.cumprod(inputs, dim=-1)`, but allows
for a flattened input tensor and a `packed_info` tensor that specifies the
chunks in the flattened input.
Args:
inputs: The tensor to be producted. Can be either a N-D tensor, or a flattened
tensor with either `packed_info` or `indices` specified.
packed_info: A tensor of shape (n_rays, 2) that specifies the start and count
of each chunk in the flattened input tensor, with in total n_rays chunks.
If None, the input is assumed to be a N-D tensor and the product is computed
along the last dimension. Default is None.
indices: A flattened tensor with the same shape as `inputs`.
Returns:
The inclusive product with the same shape as the input tensor.
Example:
.. code-block:: python
>>> inputs = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8., 9.], device="cuda")
>>> packed_info = torch.tensor([[0, 2], [2, 3], [5, 4]], device="cuda")
>>> inclusive_prod(inputs, packed_info)
tensor([1., 2., 3., 12., 60., 6., 42., 336., 3024.], device='cuda:0')
"""
if indices is not None and packed_info is not None:
raise ValueError(
"Only one of `indices` and `packed_info` can be specified."
)
if indices is not None:
assert (
indices.dim() == 1 and indices.shape == inputs.shape
), "indices must be 1-D with the same shape as inputs."
if _C.is_cub_available():
# Use CUB if available
outputs = _InclusiveProdCUB.apply(indices, inputs)
else:
warnings.warn(
"Passing in `indices` without CUB available is slow. Considering passing in `packed_info` instead."
)
packed_info = pack_info(ray_indices=indices)
if packed_info is not None:
assert inputs.dim() == 1, "inputs must be flattened."
assert (
packed_info.dim() == 2 and packed_info.shape[-1] == 2
), "packed_info must be 2-D with shape (B, 2)."
chunk_starts, chunk_cnts = packed_info.unbind(dim=-1)
outputs = _InclusiveProd.apply(chunk_starts, chunk_cnts, inputs)
if indices is None and packed_info is None:
# Batched inclusive product on the last dimension.
outputs = torch.cumprod(inputs, dim=-1)
return outputs | Inclusive Product that supports flattened tensor. This function is equivalent to `torch.cumprod(inputs, dim=-1)`, but allows for a flattened input tensor and a `packed_info` tensor that specifies the chunks in the flattened input. Args: inputs: The tensor to be producted. Can be either a N-D tensor, or a flattened tensor with either `packed_info` or `indices` specified. packed_info: A tensor of shape (n_rays, 2) that specifies the start and count of each chunk in the flattened input tensor, with in total n_rays chunks. If None, the input is assumed to be a N-D tensor and the product is computed along the last dimension. Default is None. indices: A flattened tensor with the same shape as `inputs`. Returns: The inclusive product with the same shape as the input tensor. Example: .. code-block:: python >>> inputs = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8., 9.], device="cuda") >>> packed_info = torch.tensor([[0, 2], [2, 3], [5, 4]], device="cuda") >>> inclusive_prod(inputs, packed_info) tensor([1., 2., 3., 12., 60., 6., 42., 336., 3024.], device='cuda:0') |
189,575 | import subprocess
import yaml
from rich.console import Console
from rich.style import Style
console = Console(width=120)
LOCAL_TESTS = [
"Run license checks",
"Run isort",
"Run Black",
"Python Pylint",
"Test with pytest",
]
def run_command(command: str) -> bool:
"""Run a command kill actions if it fails
Args:
command: command to run
continue_on_fail: whether to continue running commands if the current one fails.
"""
ret_code = subprocess.call(command, shell=True)
if ret_code != 0:
console.print(f"[bold red]Error: `{command}` failed.")
return ret_code == 0
The provided code snippet includes necessary dependencies for implementing the `run_github_actions_file` function. Write a Python function `def run_github_actions_file(filename: str)` to solve the following problem:
Run a github actions file locally. Args: filename: Which yml github actions file to run.
Here is the function:
def run_github_actions_file(filename: str):
"""Run a github actions file locally.
Args:
filename: Which yml github actions file to run.
"""
with open(filename, "rb") as f:
my_dict = yaml.safe_load(f)
steps = my_dict["jobs"]["build"]["steps"]
success = True
for step in steps:
if "name" in step and step["name"] in LOCAL_TESTS:
compressed = step["run"].replace("\n", ";").replace("\\", "")
compressed = compressed.replace("--check", "")
curr_command = f"{compressed}"
console.line()
console.rule(f"[bold green]Running: {curr_command}")
success = success and run_command(curr_command)
else:
skip_name = step["name"] if "name" in step else step["uses"]
console.print(f"Skipping {skip_name}")
# Code Testing
console.line()
console.rule("[bold green]Running pytest")
success = success and run_command("pytest")
# Add checks for building documentation
console.line()
console.rule("[bold green]Building Documentation")
success = success and run_command(
"cd docs/; make clean; make html SPHINXOPTS='-W;'"
)
if success:
console.line()
console.rule(characters="=")
console.print(
"[bold green]:TADA: :TADA: :TADA: ALL CHECKS PASSED :TADA: :TADA: :TADA:",
justify="center",
)
console.rule(characters="=")
else:
console.line()
console.rule(characters="=", style=Style(color="red"))
console.print(
"[bold red]:skull: :skull: :skull: ERRORS FOUND :skull: :skull: :skull:",
justify="center",
)
console.rule(characters="=", style=Style(color="red")) | Run a github actions file locally. Args: filename: Which yml github actions file to run. |
189,612 | from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred
from torch.utils.data import DataLoader
data_dict = {
'ETTh1': Dataset_ETT_hour,
'ETTh2': Dataset_ETT_hour,
'ETTm1': Dataset_ETT_minute,
'ETTm2': Dataset_ETT_minute,
'custom': Dataset_Custom,
}
class Dataset_Pred(Dataset):
def __init__(self, root_path, flag='pred', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None, train_only=False):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['pred']
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols = cols
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
'''
df_raw.columns: ['date', ...(other features), target feature]
'''
if self.cols:
cols = self.cols.copy()
else:
cols = list(df_raw.columns)
self.cols = cols.copy()
cols.remove('date')
if self.features == 'S':
cols.remove(self.target)
border1 = len(df_raw) - self.seq_len
border2 = len(df_raw)
if self.features == 'M' or self.features == 'MS':
df_raw = df_raw[['date'] + cols]
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == 'S':
df_raw = df_raw[['date'] + cols + [self.target]]
df_data = df_raw[[self.target]]
if self.scale:
self.scaler.fit(df_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
tmp_stamp = df_raw[['date']][border1:border2]
tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)
pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq)
df_stamp = pd.DataFrame(columns=['date'])
df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])
self.future_dates = list(pred_dates[1:])
if self.timeenc == 0:
df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)
df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)
df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)
data_stamp = df_stamp.drop(['date'], 1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = self.data_x[r_begin:r_begin + self.label_len]
else:
seq_y = self.data_y[r_begin:r_begin + self.label_len]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
def data_provider(args, flag):
Data = data_dict[args.data]
timeenc = 0 if args.embed != 'timeF' else 1
train_only = args.train_only
if flag == 'test':
shuffle_flag = False
drop_last = False
batch_size = args.batch_size
freq = args.freq
elif flag == 'pred':
shuffle_flag = False
drop_last = False
batch_size = 1
freq = args.freq
Data = Dataset_Pred
else:
shuffle_flag = True
drop_last = True
batch_size = args.batch_size
freq = args.freq
data_set = Data(
root_path=args.root_path,
data_path=args.data_path,
flag=flag,
size=[args.seq_len, args.label_len, args.pred_len],
features=args.features,
target=args.target,
timeenc=timeenc,
freq=freq,
train_only=train_only
)
print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_set, data_loader | null |
189,614 | import torch
import torch.nn as nn
import numpy as np
from functools import partial
from scipy.special import eval_legendre
from sympy import Poly, legendre, Symbol, chebyshevt
def legendreDer(k, x):
def get_phi_psi(k, base):
def get_filter(base, k):
def psi(psi1, psi2, i, inp):
mask = (inp<=0.5) * 1.0
return psi1[i](inp) * mask + psi2[i](inp) * (1-mask)
if base not in ['legendre', 'chebyshev']:
raise Exception('Base not supported')
x = Symbol('x')
H0 = np.zeros((k,k))
H1 = np.zeros((k,k))
G0 = np.zeros((k,k))
G1 = np.zeros((k,k))
PHI0 = np.zeros((k,k))
PHI1 = np.zeros((k,k))
phi, psi1, psi2 = get_phi_psi(k, base)
if base == 'legendre':
roots = Poly(legendre(k, 2*x-1)).all_roots()
x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64)
wm = 1/k/legendreDer(k,2*x_m-1)/eval_legendre(k-1,2*x_m-1)
for ki in range(k):
for kpi in range(k):
H0[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki](x_m/2) * phi[kpi](x_m)).sum()
G0[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, x_m/2) * phi[kpi](x_m)).sum()
H1[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki]((x_m+1)/2) * phi[kpi](x_m)).sum()
G1[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, (x_m+1)/2) * phi[kpi](x_m)).sum()
PHI0 = np.eye(k)
PHI1 = np.eye(k)
elif base == 'chebyshev':
x = Symbol('x')
kUse = 2*k
roots = Poly(chebyshevt(kUse, 2*x-1)).all_roots()
x_m = np.array([rt.evalf(20) for rt in roots]).astype(np.float64)
# x_m[x_m==0.5] = 0.5 + 1e-8 # add small noise to avoid the case of 0.5 belonging to both phi(2x) and phi(2x-1)
# not needed for our purpose here, we use even k always to avoid
wm = np.pi / kUse / 2
for ki in range(k):
for kpi in range(k):
H0[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki](x_m/2) * phi[kpi](x_m)).sum()
G0[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, x_m/2) * phi[kpi](x_m)).sum()
H1[ki, kpi] = 1/np.sqrt(2) * (wm * phi[ki]((x_m+1)/2) * phi[kpi](x_m)).sum()
G1[ki, kpi] = 1/np.sqrt(2) * (wm * psi(psi1, psi2, ki, (x_m+1)/2) * phi[kpi](x_m)).sum()
PHI0[ki, kpi] = (wm * phi[ki](2*x_m) * phi[kpi](2*x_m)).sum() * 2
PHI1[ki, kpi] = (wm * phi[ki](2*x_m-1) * phi[kpi](2*x_m-1)).sum() * 2
PHI0[np.abs(PHI0)<1e-8] = 0
PHI1[np.abs(PHI1)<1e-8] = 0
H0[np.abs(H0)<1e-8] = 0
H1[np.abs(H1)<1e-8] = 0
G0[np.abs(G0)<1e-8] = 0
G1[np.abs(G1)<1e-8] = 0
return H0, H1, G0, G1, PHI0, PHI1 | null |
189,622 | import numpy as np
def RSE(pred, true):
def CORR(pred, true):
def MAE(pred, true):
def MSE(pred, true):
def RMSE(pred, true):
def MAPE(pred, true):
def MSPE(pred, true):
def metric2(pred, true):
mae = MAE(pred, true)
mse = MSE(pred, true)
rmse = RMSE(pred, true)
mape = MAPE(pred, true)
mspe = MSPE(pred, true)
rse = RSE(pred, true)
corr = CORR(pred, true)
return mae, mse, rmse, mape, mspe, rse, corr | null |
189,627 | import numpy as np
import torch
import matplotlib.pyplot as plt
import time
def adjust_learning_rate(optimizer, epoch, args):
# lr = args.learning_rate * (0.2 ** (epoch // 2))
if args.lradj == 'type1':
lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}
elif args.lradj == 'type2':
lr_adjust = {
2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,
10: 5e-7, 15: 1e-7, 20: 5e-8
}
elif args.lradj == '3':
lr_adjust = {epoch: args.learning_rate if epoch < 10 else args.learning_rate*0.1}
elif args.lradj == '4':
lr_adjust = {epoch: args.learning_rate if epoch < 15 else args.learning_rate*0.1}
elif args.lradj == '5':
lr_adjust = {epoch: args.learning_rate if epoch < 25 else args.learning_rate*0.1}
elif args.lradj == '6':
lr_adjust = {epoch: args.learning_rate if epoch < 5 else args.learning_rate*0.1}
if epoch in lr_adjust.keys():
lr = lr_adjust[epoch]
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('Updating learning rate to {}'.format(lr)) | null |
189,630 | import numpy as np
def RSE(pred, true):
return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2))
def CORR(pred, true):
u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0)
d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0))
d += 1e-12
return 0.01*(u / d).mean(-1)
def MAE(pred, true):
return np.mean(np.abs(pred - true))
def MSE(pred, true):
return np.mean((pred - true) ** 2)
def RMSE(pred, true):
return np.sqrt(MSE(pred, true))
def MAPE(pred, true):
return np.mean(np.abs((pred - true) / true))
def MSPE(pred, true):
return np.mean(np.square((pred - true) / true))
def metric(pred, true):
mae = MAE(pred, true)
mse = MSE(pred, true)
rmse = RMSE(pred, true)
mape = MAPE(pred, true)
mspe = MSPE(pred, true)
rse = RSE(pred, true)
corr = CORR(pred, true)
return mae, mse, rmse, mape, mspe, rse, corr | null |