python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
import os import fire import random from retry.api import retry_call from tqdm import tqdm from datetime import datetime from functools import wraps from lightweight_gan import Trainer, NanException from lightweight_gan.diff_augment_test import DiffAugmentTest import torch import torch.multiprocessing as mp import torch.distributed as dist import numpy as np def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_list(el): return el if isinstance(el, list) else [el] def timestamped_filename(prefix = 'generated-'): now = datetime.now() timestamp = now.strftime("%m-%d-%Y_%H-%M-%S") return f'{prefix}{timestamp}' def set_seed(seed): torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) random.seed(seed) def run_training(rank, world_size, model_args, data, load_from, new, num_train_steps, name, seed, use_aim, aim_repo, aim_run_hash): is_main = rank == 0 is_ddp = world_size > 1 if is_ddp: set_seed(seed) os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '12355' dist.init_process_group('nccl', rank=rank, world_size=world_size) print(f"{rank + 1}/{world_size} process initialized.") model_args.update( is_ddp = is_ddp, rank = rank, world_size = world_size ) model = Trainer(**model_args, hparams=model_args, use_aim=use_aim, aim_repo=aim_repo, aim_run_hash=aim_run_hash) if not new: model.load(load_from) else: model.clear() model.set_data_src(data) progress_bar = tqdm(initial = model.steps, total = num_train_steps, mininterval=10., desc=f'{name}<{data}>') while model.steps < num_train_steps: retry_call(model.train, tries=3, exceptions=NanException) progress_bar.n = model.steps progress_bar.refresh() if is_main and model.steps % 50 == 0: model.print_log() model.save(model.checkpoint_num) if is_ddp: dist.destroy_process_group() def train_from_folder( data = './data', results_dir = './results', models_dir = './models', name = 'default', new = False, load_from = -1, image_size = 256, optimizer = 'adam', fmap_max = 512, transparent = False, greyscale = False, batch_size = 10, gradient_accumulate_every = 4, num_train_steps = 150000, learning_rate = 2e-4, save_every = 1000, evaluate_every = 1000, generate = False, generate_types = ['default', 'ema'], generate_interpolation = False, aug_test = False, aug_prob=None, aug_types=['cutout', 'translation'], dataset_aug_prob=0., attn_res_layers = [32], freq_chan_attn = False, disc_output_size = 1, dual_contrast_loss = False, antialias = False, interpolation_num_steps = 100, save_frames = False, num_image_tiles = None, num_workers = None, multi_gpus = False, calculate_fid_every = None, calculate_fid_num_images = 12800, clear_fid_cache = False, seed = 42, amp = False, show_progress = False, use_aim = False, aim_repo = None, aim_run_hash = None, load_strict = True ): num_image_tiles = default(num_image_tiles, 4 if image_size > 512 else 8) model_args = dict( name = name, results_dir = results_dir, models_dir = models_dir, batch_size = batch_size, gradient_accumulate_every = gradient_accumulate_every, attn_res_layers = cast_list(attn_res_layers), freq_chan_attn = freq_chan_attn, disc_output_size = disc_output_size, dual_contrast_loss = dual_contrast_loss, antialias = antialias, image_size = image_size, num_image_tiles = num_image_tiles, optimizer = optimizer, num_workers = num_workers, fmap_max = fmap_max, transparent = transparent, greyscale = greyscale, lr = learning_rate, save_every = save_every, evaluate_every = evaluate_every, aug_prob = aug_prob, aug_types = cast_list(aug_types), dataset_aug_prob = dataset_aug_prob, calculate_fid_every = calculate_fid_every, calculate_fid_num_images = calculate_fid_num_images, clear_fid_cache = clear_fid_cache, amp = amp, load_strict = load_strict ) if generate: model = Trainer(**model_args, use_aim = use_aim) model.load(load_from) samples_name = timestamped_filename() checkpoint = model.checkpoint_num dir_result = model.generate(samples_name, num_image_tiles, checkpoint, generate_types) print(f'sample images generated at {dir_result}') return if generate_interpolation: model = Trainer(**model_args, use_aim = use_aim) model.load(load_from) samples_name = timestamped_filename() model.generate_interpolation(samples_name, num_image_tiles, num_steps = interpolation_num_steps, save_frames = save_frames) print(f'interpolation generated at {results_dir}/{name}/{samples_name}') return if show_progress: model = Trainer(**model_args, use_aim = use_aim) model.show_progress(num_images=num_image_tiles, types=generate_types) return if aug_test: DiffAugmentTest(data=data, image_size=image_size, batch_size=batch_size, types=aug_types, nrow=num_image_tiles) return world_size = torch.cuda.device_count() if world_size == 1 or not multi_gpus: run_training(0, 1, model_args, data, load_from, new, num_train_steps, name, seed, use_aim, aim_repo, aim_run_hash) return mp.spawn(run_training, args=(world_size, model_args, data, load_from, new, num_train_steps, name, seed, use_aim, aim_repo, aim_run_hash,), nprocs=world_size, join=True) def main(): fire.Fire(train_from_folder)
lightweight-gan-main
lightweight_gan/cli.py
import os import tempfile from pathlib import Path from shutil import copyfile import torch import torchvision from torch import nn from torch.utils.data import DataLoader from lightweight_gan.lightweight_gan import AugWrapper, ImageDataset assert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.' class DummyModel(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x @torch.no_grad() def DiffAugmentTest(image_size = 256, data = './data/0.jpg', types = [], batch_size = 10, rank = 0, nrow = 5): model = DummyModel() aug_wrapper = AugWrapper(model, image_size) with tempfile.TemporaryDirectory() as directory: file = Path(data) if os.path.exists(file): file_name, ext = os.path.splitext(data) for i in range(batch_size): tmp_file_name = str(i) + ext copyfile(file, os.path.join(directory, tmp_file_name)) dataset = ImageDataset(directory, image_size, aug_prob=0) dataloader = DataLoader(dataset, batch_size=batch_size) image_batch = next(iter(dataloader)).cuda(rank) images_augment = aug_wrapper(images=image_batch, prob=1, types=types, detach=True) save_result = file_name + f'_augs{ext}' torchvision.utils.save_image(images_augment, save_result, nrow=nrow) print('Save result to:', save_result) else: print('File not found. File', file)
lightweight-gan-main
lightweight_gan/diff_augment_test.py
import os import json import multiprocessing from random import random import math from math import log2, floor from functools import lru_cache, partial from contextlib import contextmanager, ExitStack from pathlib import Path from shutil import rmtree import torch from torch.cuda.amp import autocast, GradScaler from torch.optim import Adam from torch import nn, einsum import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from torch.autograd import grad as torch_grad from torch.utils.data.distributed import DistributedSampler from torch.nn.parallel import DistributedDataParallel as DDP from PIL import Image import torchvision from torchvision import transforms from kornia.filters import filter2d from lightweight_gan.diff_augment import DiffAugment from lightweight_gan.version import __version__ from tqdm import tqdm from einops import rearrange, reduce, repeat from einops.layers.torch import Rearrange from adabelief_pytorch import AdaBelief # asserts assert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.' # constants NUM_CORES = multiprocessing.cpu_count() EXTS = ['jpg', 'jpeg', 'png', 'tiff'] # helpers def exists(val): return val is not None @contextmanager def null_context(): yield def combine_contexts(contexts): @contextmanager def multi_contexts(): with ExitStack() as stack: yield [stack.enter_context(ctx()) for ctx in contexts] return multi_contexts def is_power_of_two(val): return log2(val).is_integer() def default(val, d): return val if exists(val) else d def set_requires_grad(model, bool): for p in model.parameters(): p.requires_grad = bool def cycle(iterable): while True: for i in iterable: yield i def raise_if_nan(t): if torch.isnan(t): raise NanException def gradient_accumulate_contexts(gradient_accumulate_every, is_ddp, ddps): if is_ddp: num_no_syncs = gradient_accumulate_every - 1 head = [combine_contexts(map(lambda ddp: ddp.no_sync, ddps))] * num_no_syncs tail = [null_context] contexts = head + tail else: contexts = [null_context] * gradient_accumulate_every for context in contexts: with context(): yield def evaluate_in_chunks(max_batch_size, model, *args): split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args)))) chunked_outputs = [model(*i) for i in split_args] if len(chunked_outputs) == 1: return chunked_outputs[0] return torch.cat(chunked_outputs, dim=0) def slerp(val, low, high): low_norm = low / torch.norm(low, dim=1, keepdim=True) high_norm = high / torch.norm(high, dim=1, keepdim=True) omega = torch.acos((low_norm * high_norm).sum(1)) so = torch.sin(omega) res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high return res def safe_div(n, d): try: res = n / d except ZeroDivisionError: prefix = '' if int(n >= 0) else '-' res = float(f'{prefix}inf') return res # loss functions def gen_hinge_loss(fake, real): return fake.mean() def hinge_loss(real, fake): return (F.relu(1 + real) + F.relu(1 - fake)).mean() def dual_contrastive_loss(real_logits, fake_logits): device = real_logits.device real_logits, fake_logits = map(lambda t: rearrange(t, '... -> (...)'), (real_logits, fake_logits)) def loss_half(t1, t2): t1 = rearrange(t1, 'i -> i ()') t2 = repeat(t2, 'j -> i j', i = t1.shape[0]) t = torch.cat((t1, t2), dim = -1) return F.cross_entropy(t, torch.zeros(t1.shape[0], device = device, dtype = torch.long)) return loss_half(real_logits, fake_logits) + loss_half(-fake_logits, -real_logits) @lru_cache(maxsize=10) def det_randn(*args): """ deterministic random to track the same latent vars (and images) across training steps helps to visualize same image over training steps """ return torch.randn(*args) def interpolate_between(a, b, *, num_samples, dim): assert num_samples > 2 samples = [] step_size = 0 for _ in range(num_samples): sample = torch.lerp(a, b, step_size) samples.append(sample) step_size += 1 / (num_samples - 1) return torch.stack(samples, dim=dim) # helper classes class NanException(Exception): pass class EMA(): def __init__(self, beta): super().__init__() self.beta = beta def update_average(self, old, new): if not exists(old): return new return old * self.beta + (1 - self.beta) * new class RandomApply(nn.Module): def __init__(self, prob, fn, fn_else = lambda x: x): super().__init__() self.fn = fn self.fn_else = fn_else self.prob = prob def forward(self, x): fn = self.fn if random() < self.prob else self.fn_else return fn(x) class ChanNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.fn = fn self.norm = ChanNorm(dim) def forward(self, x): return self.fn(self.norm(x)) class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class SumBranches(nn.Module): def __init__(self, branches): super().__init__() self.branches = nn.ModuleList(branches) def forward(self, x): return sum(map(lambda fn: fn(x), self.branches)) class Blur(nn.Module): def __init__(self): super().__init__() f = torch.Tensor([1, 2, 1]) self.register_buffer('f', f) def forward(self, x): f = self.f f = f[None, None, :] * f [None, :, None] return filter2d(x, f, normalized=True) class Noise(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, x, noise = None): b, _, h, w, device = *x.shape, x.device if not exists(noise): noise = torch.randn(b, 1, h, w, device = device) return x + self.weight * noise def Conv2dSame(dim_in, dim_out, kernel_size, bias = True): pad_left = kernel_size // 2 pad_right = (pad_left - 1) if (kernel_size % 2) == 0 else pad_left return nn.Sequential( nn.ZeroPad2d((pad_left, pad_right, pad_left, pad_right)), nn.Conv2d(dim_in, dim_out, kernel_size, bias = bias) ) # attention class DepthWiseConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias), nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias) ) def forward(self, x): return self.net(x) class LinearAttention(nn.Module): def __init__(self, dim, dim_head = 64, heads = 8, kernel_size = 3): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads self.dim_head = dim_head inner_dim = dim_head * heads self.kernel_size = kernel_size self.nonlin = nn.GELU() self.to_lin_q = nn.Conv2d(dim, inner_dim, 1, bias = False) self.to_lin_kv = DepthWiseConv2d(dim, inner_dim * 2, 3, padding = 1, bias = False) self.to_q = nn.Conv2d(dim, inner_dim, 1, bias = False) self.to_kv = nn.Conv2d(dim, inner_dim * 2, 1, bias = False) self.to_out = nn.Conv2d(inner_dim * 2, dim, 1) def forward(self, fmap): h, x, y = self.heads, *fmap.shape[-2:] # linear attention lin_q, lin_k, lin_v = (self.to_lin_q(fmap), *self.to_lin_kv(fmap).chunk(2, dim = 1)) lin_q, lin_k, lin_v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (lin_q, lin_k, lin_v)) lin_q = lin_q.softmax(dim = -1) lin_k = lin_k.softmax(dim = -2) lin_q = lin_q * self.scale context = einsum('b n d, b n e -> b d e', lin_k, lin_v) lin_out = einsum('b n d, b d e -> b n e', lin_q, context) lin_out = rearrange(lin_out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y) # conv-like full attention q, k, v = (self.to_q(fmap), *self.to_kv(fmap).chunk(2, dim = 1)) q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) c x y', h = h), (q, k, v)) k = F.unfold(k, kernel_size = self.kernel_size, padding = self.kernel_size // 2) v = F.unfold(v, kernel_size = self.kernel_size, padding = self.kernel_size // 2) k, v = map(lambda t: rearrange(t, 'b (d j) n -> b n j d', d = self.dim_head), (k, v)) q = rearrange(q, 'b c ... -> b (...) c') * self.scale sim = einsum('b i d, b i j d -> b i j', q, k) sim = sim - sim.amax(dim = -1, keepdim = True).detach() attn = sim.softmax(dim = -1) full_out = einsum('b i j, b i j d -> b i d', attn, v) full_out = rearrange(full_out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y) # add outputs of linear attention + conv like full attention lin_out = self.nonlin(lin_out) out = torch.cat((lin_out, full_out), dim = 1) return self.to_out(out) # dataset def convert_image_to(img_type, image): if image.mode != img_type: return image.convert(img_type) return image class identity(object): def __call__(self, tensor): return tensor class expand_greyscale(object): def __init__(self, transparent): self.transparent = transparent def __call__(self, tensor): channels = tensor.shape[0] num_target_channels = 4 if self.transparent else 3 if channels == num_target_channels: return tensor alpha = None if channels == 1: color = tensor.expand(3, -1, -1) elif channels == 2: color = tensor[:1].expand(3, -1, -1) alpha = tensor[1:] else: raise Exception(f'image with invalid number of channels given {channels}') if not exists(alpha) and self.transparent: alpha = torch.ones(1, *tensor.shape[1:], device=tensor.device) return color if not self.transparent else torch.cat((color, alpha)) def resize_to_minimum_size(min_size, image): if max(*image.size) < min_size: return torchvision.transforms.functional.resize(image, min_size) return image class ImageDataset(Dataset): def __init__( self, folder, image_size, transparent = False, greyscale = False, aug_prob = 0. ): super().__init__() self.folder = folder self.image_size = image_size self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')] assert len(self.paths) > 0, f'No images were found in {folder} for training' if transparent: num_channels = 4 pillow_mode = 'RGBA' expand_fn = expand_greyscale(transparent) elif greyscale: num_channels = 1 pillow_mode = 'L' expand_fn = identity() else: num_channels = 3 pillow_mode = 'RGB' expand_fn = expand_greyscale(transparent) convert_image_fn = partial(convert_image_to, pillow_mode) self.transform = transforms.Compose([ transforms.Lambda(convert_image_fn), transforms.Lambda(partial(resize_to_minimum_size, image_size)), transforms.Resize(image_size), RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)), transforms.ToTensor(), transforms.Lambda(expand_fn) ]) def __len__(self): return len(self.paths) def __getitem__(self, index): path = self.paths[index] img = Image.open(path) return self.transform(img) # augmentations def random_hflip(tensor, prob): if prob > random(): return tensor return torch.flip(tensor, dims=(3,)) class AugWrapper(nn.Module): def __init__(self, D, image_size): super().__init__() self.D = D def forward(self, images, prob = 0., types = [], detach = False, **kwargs): context = torch.no_grad if detach else null_context with context(): if random() < prob: images = random_hflip(images, prob=0.5) images = DiffAugment(images, types=types) return self.D(images, **kwargs) # modifiable global variables norm_class = nn.BatchNorm2d class PixelShuffleUpsample(nn.Module): def __init__(self, dim, dim_out = None): super().__init__() dim_out = default(dim_out, dim) conv = nn.Conv2d(dim, dim_out * 4, 1) self.net = nn.Sequential( conv, nn.SiLU(), nn.PixelShuffle(2) ) self.init_conv_(conv) def init_conv_(self, conv): o, i, h, w = conv.weight.shape conv_weight = torch.empty(o // 4, i, h, w) nn.init.kaiming_uniform_(conv_weight) conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...') conv.weight.data.copy_(conv_weight) nn.init.zeros_(conv.bias.data) def forward(self, x): return self.net(x) def SPConvDownsample(dim, dim_out = None): # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample # named SP-conv in the paper, but basically a pixel unshuffle dim_out = default(dim_out, dim) return nn.Sequential( Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2), nn.Conv2d(dim * 4, dim_out, 1) ) # squeeze excitation classes # global context network # https://arxiv.org/abs/2012.13375 # similar to squeeze-excite, but with a simplified attention pooling and a subsequent layer norm class GlobalContext(nn.Module): def __init__( self, *, chan_in, chan_out ): super().__init__() self.to_k = nn.Conv2d(chan_in, 1, 1) chan_intermediate = max(3, chan_out // 2) self.net = nn.Sequential( nn.Conv2d(chan_in, chan_intermediate, 1), nn.LeakyReLU(0.1), nn.Conv2d(chan_intermediate, chan_out, 1), nn.Sigmoid() ) def forward(self, x): context = self.to_k(x) context = context.flatten(2).softmax(dim = -1) out = einsum('b i n, b c n -> b c i', context, x.flatten(2)) out = out.unsqueeze(-1) return self.net(out) # frequency channel attention # https://arxiv.org/abs/2012.11879 def get_1d_dct(i, freq, L): result = math.cos(math.pi * freq * (i + 0.5) / L) / math.sqrt(L) return result * (1 if freq == 0 else math.sqrt(2)) def get_dct_weights(width, channel, fidx_u, fidx_v): dct_weights = torch.zeros(1, channel, width, width) c_part = channel // len(fidx_u) for i, (u_x, v_y) in enumerate(zip(fidx_u, fidx_v)): for x in range(width): for y in range(width): coor_value = get_1d_dct(x, u_x, width) * get_1d_dct(y, v_y, width) dct_weights[:, i * c_part: (i + 1) * c_part, x, y] = coor_value return dct_weights class FCANet(nn.Module): def __init__( self, *, chan_in, chan_out, reduction = 4, width ): super().__init__() freq_w, freq_h = ([0] * 8), list(range(8)) # in paper, it seems 16 frequencies was ideal dct_weights = get_dct_weights(width, chan_in, [*freq_w, *freq_h], [*freq_h, *freq_w]) self.register_buffer('dct_weights', dct_weights) chan_intermediate = max(3, chan_out // reduction) self.net = nn.Sequential( nn.Conv2d(chan_in, chan_intermediate, 1), nn.LeakyReLU(0.1), nn.Conv2d(chan_intermediate, chan_out, 1), nn.Sigmoid() ) def forward(self, x): x = reduce(x * self.dct_weights, 'b c (h h1) (w w1) -> b c h1 w1', 'sum', h1 = 1, w1 = 1) return self.net(x) # generative adversarial network class Generator(nn.Module): def __init__( self, *, image_size, latent_dim = 256, fmap_max = 512, fmap_inverse_coef = 12, transparent = False, greyscale = False, attn_res_layers = [], freq_chan_attn = False ): super().__init__() resolution = log2(image_size) assert is_power_of_two(image_size), 'image size must be a power of 2' if transparent: init_channel = 4 elif greyscale: init_channel = 1 else: init_channel = 3 fmap_max = default(fmap_max, latent_dim) self.initial_conv = nn.Sequential( nn.ConvTranspose2d(latent_dim, latent_dim * 2, 4), norm_class(latent_dim * 2), nn.GLU(dim = 1) ) num_layers = int(resolution) - 2 features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), range(2, num_layers + 2))) features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features)) features = list(map(lambda n: 3 if n[0] >= 8 else n[1], features)) features = [latent_dim, *features] in_out_features = list(zip(features[:-1], features[1:])) self.res_layers = range(2, num_layers + 2) self.layers = nn.ModuleList([]) self.res_to_feature_map = dict(zip(self.res_layers, in_out_features)) self.sle_map = ((3, 7), (4, 8), (5, 9), (6, 10)) self.sle_map = list(filter(lambda t: t[0] <= resolution and t[1] <= resolution, self.sle_map)) self.sle_map = dict(self.sle_map) self.num_layers_spatial_res = 1 for (res, (chan_in, chan_out)) in zip(self.res_layers, in_out_features): image_width = 2 ** res attn = None if image_width in attn_res_layers: attn = PreNorm(chan_in, LinearAttention(chan_in)) sle = None if res in self.sle_map: residual_layer = self.sle_map[res] sle_chan_out = self.res_to_feature_map[residual_layer - 1][-1] if freq_chan_attn: sle = FCANet( chan_in = chan_out, chan_out = sle_chan_out, width = 2 ** (res + 1) ) else: sle = GlobalContext( chan_in = chan_out, chan_out = sle_chan_out ) layer = nn.ModuleList([ nn.Sequential( PixelShuffleUpsample(chan_in), Blur(), Conv2dSame(chan_in, chan_out * 2, 4), Noise(), norm_class(chan_out * 2), nn.GLU(dim = 1) ), sle, attn ]) self.layers.append(layer) self.out_conv = nn.Conv2d(features[-1], init_channel, 3, padding = 1) def forward(self, x): x = rearrange(x, 'b c -> b c () ()') x = self.initial_conv(x) x = F.normalize(x, dim = 1) residuals = dict() for (res, (up, sle, attn)) in zip(self.res_layers, self.layers): if exists(attn): x = attn(x) + x x = up(x) if exists(sle): out_res = self.sle_map[res] residual = sle(x) residuals[out_res] = residual next_res = res + 1 if next_res in residuals: x = x * residuals[next_res] return self.out_conv(x) class SimpleDecoder(nn.Module): def __init__( self, *, chan_in, chan_out = 3, num_upsamples = 4, ): super().__init__() self.layers = nn.ModuleList([]) final_chan = chan_out chans = chan_in for ind in range(num_upsamples): last_layer = ind == (num_upsamples - 1) chan_out = chans if not last_layer else final_chan * 2 layer = nn.Sequential( PixelShuffleUpsample(chans), nn.Conv2d(chans, chan_out, 3, padding = 1), nn.GLU(dim = 1) ) self.layers.append(layer) chans //= 2 def forward(self, x): for layer in self.layers: x = layer(x) return x class Discriminator(nn.Module): def __init__( self, *, image_size, fmap_max = 512, fmap_inverse_coef = 12, transparent = False, greyscale = False, disc_output_size = 5, attn_res_layers = [] ): super().__init__() resolution = log2(image_size) assert is_power_of_two(image_size), 'image size must be a power of 2' assert disc_output_size in {1, 5}, 'discriminator output dimensions can only be 5x5 or 1x1' resolution = int(resolution) if transparent: init_channel = 4 elif greyscale: init_channel = 1 else: init_channel = 3 num_non_residual_layers = max(0, int(resolution) - 8) num_residual_layers = 8 - 3 non_residual_resolutions = range(min(8, resolution), 2, -1) features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), non_residual_resolutions)) features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features)) if num_non_residual_layers == 0: res, _ = features[0] features[0] = (res, init_channel) chan_in_out = list(zip(features[:-1], features[1:])) self.non_residual_layers = nn.ModuleList([]) for ind in range(num_non_residual_layers): first_layer = ind == 0 last_layer = ind == (num_non_residual_layers - 1) chan_out = features[0][-1] if last_layer else init_channel self.non_residual_layers.append(nn.Sequential( Blur(), nn.Conv2d(init_channel, chan_out, 4, stride = 2, padding = 1), nn.LeakyReLU(0.1) )) self.residual_layers = nn.ModuleList([]) for (res, ((_, chan_in), (_, chan_out))) in zip(non_residual_resolutions, chan_in_out): image_width = 2 ** res attn = None if image_width in attn_res_layers: attn = PreNorm(chan_in, LinearAttention(chan_in)) self.residual_layers.append(nn.ModuleList([ SumBranches([ nn.Sequential( Blur(), SPConvDownsample(chan_in, chan_out), nn.LeakyReLU(0.1), nn.Conv2d(chan_out, chan_out, 3, padding = 1), nn.LeakyReLU(0.1) ), nn.Sequential( Blur(), nn.AvgPool2d(2), nn.Conv2d(chan_in, chan_out, 1), nn.LeakyReLU(0.1), ) ]), attn ])) last_chan = features[-1][-1] if disc_output_size == 5: self.to_logits = nn.Sequential( nn.Conv2d(last_chan, last_chan, 1), nn.LeakyReLU(0.1), nn.Conv2d(last_chan, 1, 4) ) elif disc_output_size == 1: self.to_logits = nn.Sequential( Blur(), nn.Conv2d(last_chan, last_chan, 3, stride = 2, padding = 1), nn.LeakyReLU(0.1), nn.Conv2d(last_chan, 1, 4) ) self.to_shape_disc_out = nn.Sequential( nn.Conv2d(init_channel, 64, 3, padding = 1), Residual(PreNorm(64, LinearAttention(64))), SumBranches([ nn.Sequential( Blur(), SPConvDownsample(64, 32), nn.LeakyReLU(0.1), nn.Conv2d(32, 32, 3, padding = 1), nn.LeakyReLU(0.1) ), nn.Sequential( Blur(), nn.AvgPool2d(2), nn.Conv2d(64, 32, 1), nn.LeakyReLU(0.1), ) ]), Residual(PreNorm(32, LinearAttention(32))), nn.AdaptiveAvgPool2d((4, 4)), nn.Conv2d(32, 1, 4) ) self.decoder1 = SimpleDecoder(chan_in = last_chan, chan_out = init_channel) self.decoder2 = SimpleDecoder(chan_in = features[-2][-1], chan_out = init_channel) if resolution >= 9 else None def forward(self, x, calc_aux_loss = False): orig_img = x for layer in self.non_residual_layers: x = layer(x) layer_outputs = [] for (net, attn) in self.residual_layers: if exists(attn): x = attn(x) + x x = net(x) layer_outputs.append(x) out = self.to_logits(x).flatten(1) img_32x32 = F.interpolate(orig_img, size = (32, 32)) out_32x32 = self.to_shape_disc_out(img_32x32) if not calc_aux_loss: return out, out_32x32, None # self-supervised auto-encoding loss layer_8x8 = layer_outputs[-1] layer_16x16 = layer_outputs[-2] recon_img_8x8 = self.decoder1(layer_8x8) aux_loss = F.mse_loss( recon_img_8x8, F.interpolate(orig_img, size = recon_img_8x8.shape[2:]) ) if exists(self.decoder2): select_random_quadrant = lambda rand_quadrant, img: rearrange(img, 'b c (m h) (n w) -> (m n) b c h w', m = 2, n = 2)[rand_quadrant] crop_image_fn = partial(select_random_quadrant, floor(random() * 4)) img_part, layer_16x16_part = map(crop_image_fn, (orig_img, layer_16x16)) recon_img_16x16 = self.decoder2(layer_16x16_part) aux_loss_16x16 = F.mse_loss( recon_img_16x16, F.interpolate(img_part, size = recon_img_16x16.shape[2:]) ) aux_loss = aux_loss + aux_loss_16x16 return out, out_32x32, aux_loss class LightweightGAN(nn.Module): def __init__( self, *, latent_dim, image_size, optimizer = "adam", fmap_max = 512, fmap_inverse_coef = 12, transparent = False, greyscale = False, disc_output_size = 5, attn_res_layers = [], freq_chan_attn = False, ttur_mult = 1., lr = 2e-4, rank = 0, ddp = False ): super().__init__() self.latent_dim = latent_dim self.image_size = image_size G_kwargs = dict( image_size = image_size, latent_dim = latent_dim, fmap_max = fmap_max, fmap_inverse_coef = fmap_inverse_coef, transparent = transparent, greyscale = greyscale, attn_res_layers = attn_res_layers, freq_chan_attn = freq_chan_attn ) self.G = Generator(**G_kwargs) self.D = Discriminator( image_size = image_size, fmap_max = fmap_max, fmap_inverse_coef = fmap_inverse_coef, transparent = transparent, greyscale = greyscale, attn_res_layers = attn_res_layers, disc_output_size = disc_output_size ) self.ema_updater = EMA(0.995) self.GE = Generator(**G_kwargs) set_requires_grad(self.GE, False) if optimizer == "adam": self.G_opt = Adam(self.G.parameters(), lr = lr, betas=(0.5, 0.9)) self.D_opt = Adam(self.D.parameters(), lr = lr * ttur_mult, betas=(0.5, 0.9)) elif optimizer == "adabelief": self.G_opt = AdaBelief(self.G.parameters(), lr = lr, betas=(0.5, 0.9)) self.D_opt = AdaBelief(self.D.parameters(), lr = lr * ttur_mult, betas=(0.5, 0.9)) else: assert False, "No valid optimizer is given" self.apply(self._init_weights) self.reset_parameter_averaging() self.cuda(rank) self.D_aug = AugWrapper(self.D, image_size) def _init_weights(self, m): if type(m) in {nn.Conv2d, nn.Linear}: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu') def EMA(self): def update_moving_average(ma_model, current_model): for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()): old_weight, up_weight = ma_params.data, current_params.data ma_params.data = self.ema_updater.update_average(old_weight, up_weight) for current_buffer, ma_buffer in zip(current_model.buffers(), ma_model.buffers()): new_buffer_value = self.ema_updater.update_average(ma_buffer, current_buffer) ma_buffer.copy_(new_buffer_value) update_moving_average(self.GE, self.G) def reset_parameter_averaging(self): self.GE.load_state_dict(self.G.state_dict()) def forward(self, x): raise NotImplemented # trainer class Trainer(): def __init__( self, name = 'default', results_dir = 'results', models_dir = 'models', base_dir = './', optimizer = 'adam', num_workers = None, latent_dim = 256, image_size = 128, num_image_tiles = 8, fmap_max = 512, transparent = False, greyscale = False, batch_size = 4, gp_weight = 10, gradient_accumulate_every = 1, attn_res_layers = [], freq_chan_attn = False, disc_output_size = 5, dual_contrast_loss = False, antialias = False, lr = 2e-4, lr_mlp = 1., ttur_mult = 1., save_every = 1000, evaluate_every = 1000, aug_prob = None, aug_types = ['translation', 'cutout'], dataset_aug_prob = 0., calculate_fid_every = None, calculate_fid_num_images = 12800, clear_fid_cache = False, is_ddp = False, rank = 0, world_size = 1, log = False, amp = False, hparams = None, use_aim = True, aim_repo = None, aim_run_hash = None, load_strict = True, *args, **kwargs ): self.GAN_params = [args, kwargs] self.GAN = None self.name = name base_dir = Path(base_dir) self.base_dir = base_dir self.results_dir = base_dir / results_dir self.models_dir = base_dir / models_dir self.fid_dir = base_dir / 'fid' / name self.config_path = self.models_dir / name / '.config.json' assert is_power_of_two(image_size), 'image size must be a power of 2 (64, 128, 256, 512, 1024)' assert all(map(is_power_of_two, attn_res_layers)), 'resolution layers of attention must all be powers of 2 (16, 32, 64, 128, 256, 512)' assert not (dual_contrast_loss and disc_output_size > 1), 'discriminator output size cannot be greater than 1 if using dual contrastive loss' self.image_size = image_size self.num_image_tiles = num_image_tiles self.latent_dim = latent_dim self.fmap_max = fmap_max self.transparent = transparent self.greyscale = greyscale assert (int(self.transparent) + int(self.greyscale)) < 2, 'you can only set either transparency or greyscale' self.aug_prob = aug_prob self.aug_types = aug_types self.lr = lr self.optimizer = optimizer self.num_workers = num_workers self.ttur_mult = ttur_mult self.batch_size = batch_size self.gradient_accumulate_every = gradient_accumulate_every self.gp_weight = gp_weight self.evaluate_every = evaluate_every self.save_every = save_every self.steps = 0 self.attn_res_layers = attn_res_layers self.freq_chan_attn = freq_chan_attn self.disc_output_size = disc_output_size self.antialias = antialias self.dual_contrast_loss = dual_contrast_loss self.d_loss = 0 self.g_loss = 0 self.last_gp_loss = None self.last_recon_loss = None self.last_fid = None self.init_folders() self.loader = None self.dataset_aug_prob = dataset_aug_prob self.calculate_fid_every = calculate_fid_every self.calculate_fid_num_images = calculate_fid_num_images self.clear_fid_cache = clear_fid_cache self.is_ddp = is_ddp self.is_main = rank == 0 self.rank = rank self.world_size = world_size self.syncbatchnorm = is_ddp self.load_strict = load_strict self.amp = amp self.G_scaler = GradScaler(enabled = self.amp) self.D_scaler = GradScaler(enabled = self.amp) self.run = None self.hparams = hparams if self.is_main and use_aim: try: import aim self.aim = aim except ImportError: print('unable to import aim experiment tracker - please run `pip install aim` first') self.run = self.aim.Run(run_hash=aim_run_hash, repo=aim_repo) self.run['hparams'] = hparams @property def image_extension(self): return 'jpg' if not self.transparent else 'png' @property def checkpoint_num(self): return floor(self.steps // self.save_every) def init_GAN(self): args, kwargs = self.GAN_params # set some global variables before instantiating GAN global norm_class global Blur norm_class = nn.SyncBatchNorm if self.syncbatchnorm else nn.BatchNorm2d Blur = nn.Identity if not self.antialias else Blur # handle bugs when # switching from multi-gpu back to single gpu if self.syncbatchnorm and not self.is_ddp: import torch.distributed as dist os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '12355' dist.init_process_group('nccl', rank=0, world_size=1) # instantiate GAN self.GAN = LightweightGAN( optimizer=self.optimizer, lr = self.lr, latent_dim = self.latent_dim, attn_res_layers = self.attn_res_layers, freq_chan_attn = self.freq_chan_attn, image_size = self.image_size, ttur_mult = self.ttur_mult, fmap_max = self.fmap_max, disc_output_size = self.disc_output_size, transparent = self.transparent, greyscale = self.greyscale, rank = self.rank, *args, **kwargs ) if self.is_ddp: ddp_kwargs = {'device_ids': [self.rank], 'output_device': self.rank, 'find_unused_parameters': True} self.G_ddp = DDP(self.GAN.G, **ddp_kwargs) self.D_ddp = DDP(self.GAN.D, **ddp_kwargs) self.D_aug_ddp = DDP(self.GAN.D_aug, **ddp_kwargs) def write_config(self): self.config_path.write_text(json.dumps(self.config())) def load_config(self): config = self.config() if not self.config_path.exists() else json.loads(self.config_path.read_text()) self.image_size = config['image_size'] self.transparent = config['transparent'] self.syncbatchnorm = config['syncbatchnorm'] self.disc_output_size = config['disc_output_size'] self.greyscale = config.pop('greyscale', False) self.attn_res_layers = config.pop('attn_res_layers', []) self.freq_chan_attn = config.pop('freq_chan_attn', False) self.optimizer = config.pop('optimizer', 'adam') self.fmap_max = config.pop('fmap_max', 512) del self.GAN self.init_GAN() def config(self): return { 'image_size': self.image_size, 'transparent': self.transparent, 'greyscale': self.greyscale, 'syncbatchnorm': self.syncbatchnorm, 'disc_output_size': self.disc_output_size, 'optimizer': self.optimizer, 'attn_res_layers': self.attn_res_layers, 'freq_chan_attn': self.freq_chan_attn } def set_data_src(self, folder): num_workers = default(self.num_workers, math.ceil(NUM_CORES / self.world_size)) self.dataset = ImageDataset(folder, self.image_size, transparent = self.transparent, greyscale = self.greyscale, aug_prob = self.dataset_aug_prob) sampler = DistributedSampler(self.dataset, rank=self.rank, num_replicas=self.world_size, shuffle=True) if self.is_ddp else None dataloader = DataLoader(self.dataset, num_workers = num_workers, batch_size = math.ceil(self.batch_size / self.world_size), sampler = sampler, shuffle = not self.is_ddp, drop_last = True, pin_memory = True) self.loader = cycle(dataloader) # auto set augmentation prob for user if dataset is detected to be low num_samples = len(self.dataset) if not exists(self.aug_prob) and num_samples < 1e5: self.aug_prob = min(0.5, (1e5 - num_samples) * 3e-6) print(f'autosetting augmentation probability to {round(self.aug_prob * 100)}%') def train(self): assert exists(self.loader), 'You must first initialize the data source with `.set_data_src(<folder of images>)`' device = torch.device(f'cuda:{self.rank}') if not exists(self.GAN): self.init_GAN() self.GAN.train() total_disc_loss = torch.zeros([], device=device) total_gen_loss = torch.zeros([], device=device) batch_size = math.ceil(self.batch_size / self.world_size) image_size = self.GAN.image_size latent_dim = self.GAN.latent_dim aug_prob = default(self.aug_prob, 0) aug_types = self.aug_types aug_kwargs = {'prob': aug_prob, 'types': aug_types} G = self.GAN.G if not self.is_ddp else self.G_ddp D = self.GAN.D if not self.is_ddp else self.D_ddp D_aug = self.GAN.D_aug if not self.is_ddp else self.D_aug_ddp apply_gradient_penalty = self.steps % 4 == 0 # amp related contexts and functions amp_context = autocast if self.amp else null_context # discriminator loss fn if self.dual_contrast_loss: D_loss_fn = dual_contrastive_loss else: D_loss_fn = hinge_loss # train discriminator self.GAN.D_opt.zero_grad() for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[D_aug, G]): latents = torch.randn(batch_size, latent_dim).cuda(self.rank) image_batch = next(self.loader).cuda(self.rank) image_batch.requires_grad_() with amp_context(): with torch.no_grad(): generated_images = G(latents) fake_output, fake_output_32x32, _ = D_aug(generated_images, detach = True, **aug_kwargs) real_output, real_output_32x32, real_aux_loss = D_aug(image_batch, calc_aux_loss = True, **aug_kwargs) real_output_loss = real_output fake_output_loss = fake_output divergence = D_loss_fn(real_output_loss, fake_output_loss) divergence_32x32 = D_loss_fn(real_output_32x32, fake_output_32x32) disc_loss = divergence + divergence_32x32 aux_loss = real_aux_loss disc_loss = disc_loss + aux_loss if apply_gradient_penalty: outputs = [real_output, real_output_32x32] outputs = list(map(self.D_scaler.scale, outputs)) if self.amp else outputs scaled_gradients = torch_grad(outputs=outputs, inputs=image_batch, grad_outputs=list(map(lambda t: torch.ones(t.size(), device = image_batch.device), outputs)), create_graph=True, retain_graph=True, only_inputs=True)[0] inv_scale = safe_div(1., self.D_scaler.get_scale()) if self.amp else 1. if inv_scale != float('inf'): gradients = scaled_gradients * inv_scale with amp_context(): gradients = gradients.reshape(batch_size, -1) gp = self.gp_weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean() if not torch.isnan(gp): disc_loss = disc_loss + gp self.last_gp_loss = gp.clone().detach().item() with amp_context(): disc_loss = disc_loss / self.gradient_accumulate_every disc_loss.register_hook(raise_if_nan) self.D_scaler.scale(disc_loss).backward() total_disc_loss += divergence self.last_recon_loss = aux_loss.item() self.d_loss = float(total_disc_loss.item() / self.gradient_accumulate_every) self.D_scaler.step(self.GAN.D_opt) self.D_scaler.update() # generator loss fn if self.dual_contrast_loss: G_loss_fn = dual_contrastive_loss G_requires_calc_real = True else: G_loss_fn = gen_hinge_loss G_requires_calc_real = False # train generator self.GAN.G_opt.zero_grad() for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[G, D_aug]): latents = torch.randn(batch_size, latent_dim).cuda(self.rank) if G_requires_calc_real: image_batch = next(self.loader).cuda(self.rank) image_batch.requires_grad_() with amp_context(): generated_images = G(latents) fake_output, fake_output_32x32, _ = D_aug(generated_images, **aug_kwargs) real_output, real_output_32x32, _ = D_aug(image_batch, **aug_kwargs) if G_requires_calc_real else (None, None, None) loss = G_loss_fn(fake_output, real_output) loss_32x32 = G_loss_fn(fake_output_32x32, real_output_32x32) gen_loss = loss + loss_32x32 gen_loss = gen_loss / self.gradient_accumulate_every gen_loss.register_hook(raise_if_nan) self.G_scaler.scale(gen_loss).backward() total_gen_loss += loss self.g_loss = float(total_gen_loss.item() / self.gradient_accumulate_every) self.G_scaler.step(self.GAN.G_opt) self.G_scaler.update() # calculate moving averages if self.is_main and self.steps % 10 == 0 and self.steps > 20000: self.GAN.EMA() if self.is_main and self.steps <= 25000 and self.steps % 1000 == 2: self.GAN.reset_parameter_averaging() # save from NaN errors if any(torch.isnan(l) for l in (total_gen_loss, total_disc_loss)): print(f'NaN detected for generator or discriminator. Loading from checkpoint #{self.checkpoint_num}') self.load(self.checkpoint_num) raise NanException del total_disc_loss del total_gen_loss # periodically save results if self.is_main: if self.steps % self.save_every == 0: self.save(self.checkpoint_num) if self.steps % self.evaluate_every == 0 or (self.steps % 100 == 0 and self.steps < 20000): self.evaluate(floor(self.steps / self.evaluate_every), num_image_tiles = self.num_image_tiles) if exists(self.calculate_fid_every) and self.steps % self.calculate_fid_every == 0 and self.steps != 0: num_batches = math.ceil(self.calculate_fid_num_images / self.batch_size) fid = self.calculate_fid(num_batches) self.last_fid = fid with open(str(self.results_dir / self.name / f'fid_scores.txt'), 'a') as f: f.write(f'{self.steps},{fid}\n') self.steps += 1 @torch.no_grad() def evaluate(self, num = 0, num_image_tiles = 4): self.GAN.eval() ext = self.image_extension num_rows = num_image_tiles latent_dim = self.GAN.latent_dim image_size = self.GAN.image_size # latents and noise def image_to_pil(image): ndarr = image.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() im = Image.fromarray(ndarr) return im latents = det_randn((num_rows ** 2, latent_dim)).cuda(self.rank) interpolate_latents = interpolate_between(latents[:num_rows], latents[-num_rows:], num_samples=num_rows, dim=0).flatten(end_dim=1) generate_interpolations = self.generate_(self.GAN.G, interpolate_latents) if self.run is not None: grouped = generate_interpolations.view(num_rows, num_rows, *generate_interpolations.shape[1:]) for idx, images in enumerate(grouped): alpha = idx / (len(grouped) - 1) aim_images = [] for image in images: im = image_to_pil(image) aim_images.append(self.aim.Image(im, caption=f'#{idx}')) self.run.track(value=aim_images, name='generated', step=self.steps, context={'interpolated': True, 'alpha': alpha}) torchvision.utils.save_image(generate_interpolations, str(self.results_dir / self.name / f'{str(num)}-interp.{ext}'), nrow=num_rows) # regular generated_images = self.generate_(self.GAN.G, latents) if self.run is not None: aim_images = [] for idx, image in enumerate(generated_images): im = image_to_pil(image) aim_images.append(self.aim.Image(im, caption=f'#{idx}')) self.run.track(value=aim_images, name='generated', step=self.steps, context={'ema': False}) torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}.{ext}'), nrow=num_rows) # moving averages generated_images = self.generate_(self.GAN.GE, latents) if self.run is not None: aim_images = [] for idx, image in enumerate(generated_images): im = image_to_pil(image) aim_images.append(self.aim.Image(im, caption=f'EMA #{idx}')) self.run.track(value=aim_images, name='generated', step=self.steps, context={'ema': True}) torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-ema.{ext}'), nrow=num_rows) @torch.no_grad() def generate(self, num=0, num_image_tiles=4, checkpoint=None, types=['default', 'ema']): self.GAN.eval() latent_dim = self.GAN.latent_dim dir_name = self.name + str('-generated-') + str(checkpoint) dir_full = Path().absolute() / self.results_dir / dir_name ext = self.image_extension if not dir_full.exists(): os.mkdir(dir_full) # regular if 'default' in types: for i in tqdm(range(num_image_tiles), desc='Saving generated default images'): latents = torch.randn((1, latent_dim)).cuda(self.rank) generated_image = self.generate_(self.GAN.G, latents) path = str(self.results_dir / dir_name / f'{str(num)}-{str(i)}.{ext}') torchvision.utils.save_image(generated_image[0], path, nrow=1) # moving averages if 'ema' in types: for i in tqdm(range(num_image_tiles), desc='Saving generated EMA images'): latents = torch.randn((1, latent_dim)).cuda(self.rank) generated_image = self.generate_(self.GAN.GE, latents) path = str(self.results_dir / dir_name / f'{str(num)}-{str(i)}-ema.{ext}') torchvision.utils.save_image(generated_image[0], path, nrow=1) return dir_full @torch.no_grad() def show_progress(self, num_images=4, types=['default', 'ema']): checkpoints = self.get_checkpoints() assert exists(checkpoints), 'cannot find any checkpoints to create a training progress video for' dir_name = self.name + str('-progress') dir_full = Path().absolute() / self.results_dir / dir_name ext = self.image_extension latents = None zfill_length = math.ceil(math.log10(len(checkpoints))) if not dir_full.exists(): os.mkdir(dir_full) for checkpoint in tqdm(checkpoints, desc='Generating progress images'): self.load(checkpoint, print_version=False) self.GAN.eval() if checkpoint == 0: latents = torch.randn((num_images, self.GAN.latent_dim)).cuda(self.rank) # regular if 'default' in types: generated_image = self.generate_(self.GAN.G, latents) path = str(self.results_dir / dir_name / f'{str(checkpoint).zfill(zfill_length)}.{ext}') torchvision.utils.save_image(generated_image, path, nrow=num_images) # moving averages if 'ema' in types: generated_image = self.generate_(self.GAN.GE, latents) path = str(self.results_dir / dir_name / f'{str(checkpoint).zfill(zfill_length)}-ema.{ext}') torchvision.utils.save_image(generated_image, path, nrow=num_images) @torch.no_grad() def calculate_fid(self, num_batches): from pytorch_fid import fid_score torch.cuda.empty_cache() real_path = self.fid_dir / 'real' fake_path = self.fid_dir / 'fake' # remove any existing files used for fid calculation and recreate directories if not real_path.exists() or self.clear_fid_cache: rmtree(real_path, ignore_errors=True) os.makedirs(real_path) for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'): real_batch = next(self.loader) for k, image in enumerate(real_batch.unbind(0)): ind = k + batch_num * self.batch_size torchvision.utils.save_image(image, real_path / f'{ind}.png') # generate a bunch of fake images in results / name / fid_fake rmtree(fake_path, ignore_errors=True) os.makedirs(fake_path) self.GAN.eval() ext = self.image_extension latent_dim = self.GAN.latent_dim image_size = self.GAN.image_size for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'): # latents and noise latents = torch.randn(self.batch_size, latent_dim).cuda(self.rank) # moving averages generated_images = self.generate_(self.GAN.GE, latents) for j, image in enumerate(generated_images.unbind(0)): ind = j + batch_num * self.batch_size torchvision.utils.save_image(image, str(fake_path / f'{str(ind)}-ema.{ext}')) return fid_score.calculate_fid_given_paths([str(real_path), str(fake_path)], 256, latents.device, 2048) @torch.no_grad() def generate_(self, G, style, num_image_tiles = 8): generated_images = evaluate_in_chunks(self.batch_size, G, style) return generated_images.clamp_(0., 1.) @torch.no_grad() def generate_interpolation(self, num = 0, num_image_tiles = 8, num_steps = 100, save_frames = False): self.GAN.eval() ext = self.image_extension num_rows = num_image_tiles latent_dim = self.GAN.latent_dim image_size = self.GAN.image_size # latents and noise latents_low = torch.randn(num_rows ** 2, latent_dim).cuda(self.rank) latents_high = torch.randn(num_rows ** 2, latent_dim).cuda(self.rank) ratios = torch.linspace(0., 8., num_steps) frames = [] for ratio in tqdm(ratios): interp_latents = slerp(ratio, latents_low, latents_high) generated_images = self.generate_(self.GAN.GE, interp_latents) images_grid = torchvision.utils.make_grid(generated_images, nrow = num_rows) pil_image = transforms.ToPILImage()(images_grid.cpu()) if self.transparent: background = Image.new('RGBA', pil_image.size, (255, 255, 255)) pil_image = Image.alpha_composite(background, pil_image) frames.append(pil_image) frames[0].save(str(self.results_dir / self.name / f'{str(num)}.gif'), save_all=True, append_images=frames[1:], duration=80, loop=0, optimize=True) if save_frames: folder_path = (self.results_dir / self.name / f'{str(num)}') folder_path.mkdir(parents=True, exist_ok=True) for ind, frame in enumerate(frames): frame.save(str(folder_path / f'{str(ind)}.{ext}')) def print_log(self): data = [ ('G', self.g_loss), ('D', self.d_loss), ('GP', self.last_gp_loss), ('SS', self.last_recon_loss), ('FID', self.last_fid) ] data = [d for d in data if exists(d[1])] log = ' | '.join(map(lambda n: f'{n[0]}: {n[1]:.2f}', data)) print(log) if self.run is not None: for key, value in data: self.run.track(value, key, step=self.steps) return data def model_name(self, num): return str(self.models_dir / self.name / f'model_{num}.pt') def init_folders(self): (self.results_dir / self.name).mkdir(parents=True, exist_ok=True) (self.models_dir / self.name).mkdir(parents=True, exist_ok=True) def clear(self): rmtree(str(self.models_dir / self.name), True) rmtree(str(self.results_dir / self.name), True) rmtree(str(self.fid_dir), True) rmtree(str(self.config_path), True) self.init_folders() def save(self, num): save_data = { 'GAN': self.GAN.state_dict(), 'version': __version__, 'G_scaler': self.G_scaler.state_dict(), 'D_scaler': self.D_scaler.state_dict() } torch.save(save_data, self.model_name(num)) self.write_config() def load(self, num=-1, print_version=True): self.load_config() name = num if num == -1: checkpoints = self.get_checkpoints() if not exists(checkpoints): return name = checkpoints[-1] print(f'continuing from previous epoch - {name}') self.steps = name * self.save_every load_data = torch.load(self.model_name(name)) if print_version and 'version' in load_data and self.is_main: print(f"loading from version {load_data['version']}") try: self.GAN.load_state_dict(load_data['GAN'], strict = self.load_strict) except Exception as e: saved_version = load_data['version'] print('unable to load save model. please try downgrading the package to the version specified by the saved model (to do so, just run `pip install lightweight-gan=={saved_version}`') raise e if 'G_scaler' in load_data: self.G_scaler.load_state_dict(load_data['G_scaler']) if 'D_scaler' in load_data: self.D_scaler.load_state_dict(load_data['D_scaler']) def get_checkpoints(self): file_paths = [p for p in Path(self.models_dir / self.name).glob('model_*.pt')] saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths)) if len(saved_nums) == 0: return None return saved_nums
lightweight-gan-main
lightweight_gan/lightweight_gan.py
from setuptools import setup, find_packages setup( name = 'routing_transformer', packages = find_packages(exclude=['examples']), version = '1.6.1', license='MIT', description = 'Routing Transformer (Pytorch)', author = 'Phil Wang, Aran Komatsuzaki', author_email = '[email protected], [email protected]', url = 'https://github.com/lucidrains/routing-transformer', keywords = ['transformers', 'attention', 'artificial intelligence'], install_requires=[ 'einops', 'local-attention>=1.4.0', 'mixture-of-experts>=0.2.0', 'product-key-memory', 'torch' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
routing-transformer-master
setup.py
import deepspeed from routing_transformer import RoutingTransformerLM from routing_transformer.autoregressive_wrapper import AutoregressiveWrapper import argparse import random import tqdm import gzip import numpy as np import torch import torch.optim as optim from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset def add_argument(): parser=argparse.ArgumentParser(description='enwik8') parser.add_argument('--with_cuda', default=False, action='store_true', help='use CPU in case there\'s no GPU support') parser.add_argument('--use_ema', default=False, action='store_true', help='whether use exponential moving average') parser.add_argument('-b', '--batch_size', default=32, type=int, help='mini-batch size (default: 32)') parser.add_argument('-e', '--epochs', default=30, type=int, help='number of total epochs (default: 30)') parser.add_argument('--local_rank', type=int, default=-1, help='local rank passed from distributed launcher') parser = deepspeed.add_config_arguments(parser) args = parser.parse_args() return args # constants VALIDATE_EVERY = 100 GENERATE_EVERY = 500 GENERATE_LENGTH = 1024 SEQ_LEN = 4096 # helpers def decode_token(token): return str(chr(max(32, token))) def decode_tokens(tokens): return ''.join(list(map(decode_token, tokens))) # instantiate model model = RoutingTransformerLM( num_tokens = 256, dim = 512, depth = 8, max_seq_len = SEQ_LEN, heads = 8, causal = True, window_size = 128, reversible = True, ff_chunks = 2, attn_dropout = 0.1, rel_pos_emb = False, n_local_attn_heads = (8, 8, 8, 8, 4, 4, 2, 2) ) model = AutoregressiveWrapper(model) model.cuda() # prepare enwik8 data with gzip.open('./data/enwik8.gz') as file: X = np.fromstring(file.read(int(95e6)), dtype=np.uint8) trX, vaX = np.split(X, [int(90e6)]) data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX) class TextSamplerDataset(Dataset): def __init__(self, data, seq_len): super().__init__() self.data = data self.seq_len = seq_len def __getitem__(self, index): rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,)) full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long() return full_seq, torch.ones_like(full_seq).bool() def __len__(self): return self.data.size(0) // self.seq_len train_dataset = TextSamplerDataset(data_train, SEQ_LEN) val_dataset = TextSamplerDataset(data_val, SEQ_LEN) # setup deepspeed cmd_args = add_argument() model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset) # training for i, (data, mask) in enumerate(trainloader): model_engine.train() data = data.to(model_engine.local_rank) loss = model_engine(data, return_loss = True, randomly_truncate_sequence = True) model_engine.backward(loss) model_engine.step() print(loss.item()) if i % VALIDATE_EVERY == 0: model.eval() with torch.no_grad(): inp, _ = random.choice(val_dataset) loss = model(inp[None, :].cuda(), return_loss = True) print(f'validation loss: {loss.item()}') if i != 0 and model_engine.local_rank == 0 and i % GENERATE_EVERY == 0: model.eval() inp, _ = random.choice(val_dataset) print(inp.shape, inp) prime = decode_tokens(inp) print(f'%s \n\n %s', (prime, '*' * 100)) sample = model.generate(inp.cuda(), GENERATE_LENGTH) output_str = decode_tokens(sample) print(output_str)
routing-transformer-master
examples/enwik8_deepspeed/train.py
import torch import numpy as np import math import time import random from torch.optim import Adam from routing_transformer.routing_transformer import RoutingTransformerLM from routing_transformer.autoregressive_wrapper import AutoregressiveWrapper s = RoutingTransformerLM( num_tokens = 256 + 4, dim = 1024, depth = 2, heads = 8, max_seq_len = 256, causal = True, window_size = 128 ).cuda() s = AutoregressiveWrapper(s, ignore_index = 0, pad_value = 0) opt = Adam(s.parameters(), lr=1e-4) N_BATCH = 32 SRC_SEQ_LEN = 128 TGT_SEQ_LEN = 128 bos = 1*torch.ones(N_BATCH, 1).long() eos = 2*torch.ones(N_BATCH, 1).long() pos = 3*torch.ones(N_BATCH, 1).long() for i in range(10000): train_seq_in = torch.randint(4, 6, (N_BATCH, SRC_SEQ_LEN - 2)).long() train_seq_out = train_seq_in + 1 train_seq = torch.cat([bos, train_seq_in, pos, pos, pos, train_seq_out, eos], dim=1).cuda() loss = s(train_seq, return_loss = True) loss.backward() opt.step() opt.zero_grad() print(i, loss.item())
routing-transformer-master
examples/toy_tasks/increment.py
import tqdm import torch import torch.optim as optim from routing_transformer import RoutingTransformerEncDec # constants NUM_BATCHES = int(1e5) BATCH_SIZE = 32 LEARNING_RATE = 1e-4 GENERATE_EVERY = 100 NUM_TOKENS = 256 + 2 ENC_SEQ_LEN = 128 DEC_SEQ_LEN = 256 # helpers def cycle(): while True: prefix = torch.ones((BATCH_SIZE, 1)).long().cuda() src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda() tgt = torch.cat((prefix, src, src), 1) src_mask = torch.ones(BATCH_SIZE, ENC_SEQ_LEN).bool().cuda() tgt_mask = torch.ones(BATCH_SIZE, tgt.shape[1]).bool().cuda() yield (src, tgt, src_mask, tgt_mask) # instantiate model model = RoutingTransformerEncDec( dim=512, enc_num_tokens=NUM_TOKENS, enc_depth=3, enc_heads=8, enc_max_seq_len=ENC_SEQ_LEN, enc_window_size=32, dec_num_tokens = NUM_TOKENS, dec_depth = 3, dec_heads = 8, dec_max_seq_len=DEC_SEQ_LEN, dec_window_size=32, ).cuda() # optimizer optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) # training for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'): model.train() src, tgt, src_mask, tgt_mask = next(cycle()) loss, _ = model(src, tgt, enc_input_mask=src_mask, dec_input_mask=tgt_mask, return_loss = True, randomly_truncate_sequence = True) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) optim.step() optim.zero_grad() if i != 0 and i % GENERATE_EVERY == 0: model.eval() src, _, src_mask, _ = next(cycle()) src, src_mask = src[0:1], src_mask[0:1] start_tokens = (torch.ones((1, 1)) * 1).long().cuda() sample = model.generate(src, start_tokens, ENC_SEQ_LEN, enc_input_mask=src_mask) incorrects = (src != sample).abs().sum() print(f"input: ", src) print(f"predicted output: ", sample) print(f"incorrects: {incorrects}")
routing-transformer-master
examples/toy_tasks/enc_dec_copy_task.py
from routing_transformer import RoutingTransformerLM from routing_transformer.autoregressive_wrapper import AutoregressiveWrapper import random import tqdm import gzip import numpy as np import torch import torch.optim as optim from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset # constants NUM_BATCHES = int(1e5) BATCH_SIZE = 4 GRADIENT_ACCUMULATE_EVERY = 4 LEARNING_RATE = 1e-4 VALIDATE_EVERY = 100 GENERATE_EVERY = 500 GENERATE_LENGTH = 512 SEQ_LEN = 4096 # helpers def cycle(loader): while True: for data in loader: yield data def decode_token(token): return str(chr(max(32, token))) def decode_tokens(tokens): return ''.join(list(map(decode_token, tokens))) # instantiate model model = RoutingTransformerLM( num_tokens = 256, dim = 512, depth = 6, max_seq_len = SEQ_LEN, heads = 8, causal = True, window_size = 128, n_local_attn_heads = (8, 8, 8, 4, 4, 4) ) model = AutoregressiveWrapper(model) model.cuda() # prepare enwik8 data with gzip.open('./data/enwik8.gz') as file: X = np.fromstring(file.read(int(95e6)), dtype=np.uint8) trX, vaX = np.split(X, [int(90e6)]) data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX) class TextSamplerDataset(Dataset): def __init__(self, data, seq_len): super().__init__() self.data = data self.seq_len = seq_len def __getitem__(self, index): rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,)) full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long() return full_seq.cuda() def __len__(self): return self.data.size(0) // self.seq_len train_dataset = TextSamplerDataset(data_train, SEQ_LEN) val_dataset = TextSamplerDataset(data_val, SEQ_LEN) train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE)) val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE)) # optimizer optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) # training for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'): model.train() for __ in range(GRADIENT_ACCUMULATE_EVERY): loss = model(next(train_loader), return_loss = True) loss.backward() print(f'training loss: {loss.item()}') torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) optim.step() optim.zero_grad() if i % VALIDATE_EVERY == 0: model.eval() with torch.no_grad(): loss = model(next(val_loader), return_loss = True) print(f'validation loss: {loss.item()}') if i % GENERATE_EVERY == 0: model.eval() inp = random.choice(val_dataset)[:-1] prime = decode_tokens(inp) print(f'%s \n\n %s', (prime, '*' * 100)) sample = model.generate(inp, GENERATE_LENGTH) output_str = decode_tokens(sample) print(output_str)
routing-transformer-master
examples/enwik8_simple/train.py
import math import torch from torch import nn from routing_transformer.routing_transformer import RoutingTransformer import torch.nn.functional as F def find_module(nn_module, type): for module in nn_module.modules(): if isinstance(module, type): return module return None def pad_to_multiple(tensor, multiple, dim=-1, value=0): seqlen = tensor.shape[dim] m = seqlen / multiple if m.is_integer(): return tensor pre_pad_offset = (0,) * (-1 - dim) * 2 padding = math.ceil(m) * multiple - seqlen padded_tensor = F.pad(tensor, (*pre_pad_offset, *(0, padding)), value=value) return padded_tensor class Autopadder(nn.Module): def __init__(self, net): super().__init__() transformer = find_module(net, RoutingTransformer) self.net = net self.pad_multiple = transformer.pad_to_multiple def forward(self, x, **kwargs): if self.pad_multiple <= 0: return self.net(x, **kwargs) b, t, device = *x.shape, x.device input_mask = kwargs.get('input_mask') if input_mask is None: input_mask = torch.full((b, t), True, device=device, dtype=torch.bool) x = pad_to_multiple(x, self.pad_multiple, dim=1) new_mask = pad_to_multiple(input_mask, self.pad_multiple, dim=1, value=False) kwargs.update(input_mask=new_mask) out, loss = self.net(x, **kwargs) return out[:, 0:t], loss
routing-transformer-master
routing_transformer/autopadder.py
from functools import partial import torch import random from torch import nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence from routing_transformer.routing_transformer import RoutingTransformerLM from routing_transformer.autopadder import Autopadder def default(value, default): return value if value is not None else default def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > 1.0 - thres sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) def top_k(logits, thres = 0.9): k = int((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs def pad_sequence_right(seqs, value): m = max([len(s) for s in seqs]) return torch.stack([F.pad(s, (0, m - len(s))) for s in seqs]) def truncate_sequence(inputs, mask = None, pad_value=0): b, t, device, dtype = *inputs.shape, inputs.device, inputs.dtype mask = default(mask, torch.ones_like(inputs).bool()) rand_length = random.randint(2, t) return inputs[:, :rand_length], mask[:, :rand_length] class AutoregressiveWrapper(nn.Module): def __init__(self, net, ignore_index = None, pad_value = 0): super().__init__() assert isinstance(net, RoutingTransformerLM), 'generative trainer wrapper can only accept RoutingTransformerLM class' self.pad_value = pad_value self.ignore_index = default(ignore_index, pad_value) self.net = Autopadder(net) self.max_seq_len = net.max_seq_len self.base_net = net def update_kmeans(self): self.base_net.update_kmeans() @torch.no_grad() def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs): was_training = self.net.training num_dims = len(start_tokens.shape) if num_dims == 1: start_tokens = start_tokens[None, :] b, t = start_tokens.shape self.net.eval() out = start_tokens input_mask = kwargs.pop('input_mask', None) if input_mask is None: input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device) for _ in range(seq_len): x = out[:, -self.max_seq_len:] input_mask = input_mask[:, -self.max_seq_len:] logits, _ = self.net(x, input_mask=input_mask, **kwargs) logits = logits[:, -1, :] filtered_logits = filter_logits_fn(logits, thres = filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) input_mask = F.pad(input_mask, (1, 0), value=True) if eos_token is not None and (sample == eos_token).all(): break out = out[:, t:] if num_dims == 1: out = out.squeeze(0) self.net.train(was_training) return out def forward(self, x, return_loss = False, randomly_truncate_sequence = False, **kwargs): pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value) if not return_loss: if not isinstance(x, torch.Tensor): x = pad(x) return self.net(x, **kwargs) m = kwargs.get('input_mask', None) if randomly_truncate_sequence: x, m = truncate_sequence(x, m, pad_value = self.pad_value) if isinstance(x, torch.Tensor): xi, xo = x[:, :-1], x[:, 1:] else: xi = pad(list(map(lambda t: t[:-1], x))) xo = pad(list(map(lambda t: t[1:], x))) if m is not None: assert m.shape == x.shape[0:2], 'input mask must be the same shape as the input of the auto-regressive wrapper to automatically handle' kwargs['input_mask'] = m[:, :-1] out, aux_loss = self.net(xi, **kwargs) loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index) loss = loss + aux_loss return loss
routing-transformer-master
routing_transformer/autoregressive_wrapper.py
import torch import torch.nn as nn from operator import itemgetter from torch.autograd.function import Function from torch.utils.checkpoint import get_device_states, set_device_states # for routing arguments into the functions of the reversible layer def route_args(router, args, depth): routed_args = [(dict(), dict()) for _ in range(depth)] matched_keys = [key for key in args.keys() if key in router] for key in matched_keys: val = args[key] for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])): new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes) routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args}) return routed_args def layer_drop(layers, prob): to_drop = torch.empty(len(layers)).uniform_(0, 1) < prob blocks = [block for block, drop in zip(layers, to_drop) if not drop] blocks = layers[:1] if len(blocks) == 0 else blocks return blocks def cast_return(ret, requires_grad = True): if type(ret) is not tuple: loss = torch.tensor(0., device=ret.device, dtype=ret.dtype, requires_grad=requires_grad) return (ret, loss) return ret # following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html class Deterministic(nn.Module): def __init__(self, net): super().__init__() self.net = net self.cpu_state = None self.cuda_in_fwd = None self.gpu_devices = None self.gpu_states = None def record_rng(self, *args): self.cpu_state = torch.get_rng_state() if torch.cuda._initialized: self.cuda_in_fwd = True self.gpu_devices, self.gpu_states = get_device_states(*args) def forward(self, *args, record_rng = False, set_rng = False, **kwargs): if record_rng: self.record_rng(*args) if not set_rng: return self.net(*args, **kwargs) rng_devices = [] if self.cuda_in_fwd: rng_devices = self.gpu_devices with torch.random.fork_rng(devices=rng_devices, enabled=True): torch.set_rng_state(self.cpu_state) if self.cuda_in_fwd: set_device_states(self.gpu_devices, self.gpu_states) return self.net(*args, **kwargs) # heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py # once multi-GPU is confirmed working, refactor and send PR back to source class ReversibleBlock(nn.Module): def __init__(self, f, g): super().__init__() self.f = Deterministic(f) self.g = Deterministic(g) def forward(self, x, f_args = {}, g_args = {}): x1, x2 = torch.chunk(x, 2, dim=2) y1, y2 = None, None f_args['_reverse'] = g_args['_reverse'] = False with torch.no_grad(): f_out, f_loss = cast_return(self.f(x2, record_rng=self.training, **f_args), requires_grad = False) y1 = x1 + f_out g_out, g_loss = cast_return(self.g(y1, record_rng=self.training, **g_args), requires_grad = False) y2 = x2 + g_out return torch.cat([y1, y2], dim=2), f_loss, g_loss def backward_pass(self, y, dy, dl_f, dl_g, f_args = {}, g_args = {}): y1, y2 = torch.chunk(y, 2, dim=2) del y dy1, dy2 = torch.chunk(dy, 2, dim=2) del dy f_args['_reverse'] = g_args['_reverse'] = True with torch.enable_grad(): y1.requires_grad = True gy1, g_loss = cast_return(self.g(y1, set_rng=True, **g_args)) torch.autograd.backward((gy1, g_loss), (dy2, dl_g)) with torch.no_grad(): x2 = y2 - gy1 del y2, gy1 dx1 = dy1 + y1.grad del dy1 y1.grad = None with torch.enable_grad(): x2.requires_grad = True fx2, f_loss = cast_return(self.f(x2, set_rng=True, **f_args)) torch.autograd.backward((fx2, f_loss), (dx1, dl_f), retain_graph=True) with torch.no_grad(): x1 = y1 - fx2 del y1, fx2 dx2 = dy2 + x2.grad del dy2 x2.grad = None x = torch.cat([x1, x2.detach()], dim=2) dx = torch.cat([dx1, dx2], dim=2) return x, dx class _ReversibleFunction(Function): @staticmethod def forward(ctx, x, blocks, args): ctx.args = args f_aux_loss = [] g_aux_loss = [] for block, kwarg in zip(blocks, args): x, f_loss, g_loss = block(x, **kwarg) f_aux_loss.append(f_loss) g_aux_loss.append(g_loss) ctx.y = x.detach() ctx.blocks = blocks return x, torch.stack(f_aux_loss), torch.stack(g_aux_loss) @staticmethod def backward(ctx, dy, dl_f, dl_g): y = ctx.y args = ctx.args for block, kwargs, ind in zip(ctx.blocks[::-1], args[::-1], range(len(ctx.blocks))[::-1]): y, dy = block.backward_pass(y, dy, dl_f[ind], dl_g[ind], **kwargs) return dy, None, None class SequentialSequence(nn.Module): def __init__(self, layers, args_route = {}, layer_dropout = 0.): super().__init__() assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers' self.layers = layers self.args_route = args_route self.layer_dropout = layer_dropout def forward(self, x, **kwargs): args = route_args(self.args_route, kwargs, len(self.layers)) layers_and_args = list(zip(self.layers, args)) if self.training and self.layer_dropout > 0: layers_and_args = layer_drop(layers_and_args, self.layer_dropout) aux_loss = torch.zeros(1, device=x.device, dtype=x.dtype) for (f, g), (f_args, g_args) in layers_and_args: res, loss = cast_return(f(x, **f_args)) aux_loss += loss x = x + res res, loss = cast_return(g(x, **g_args)) aux_loss += loss x = x + res return x, aux_loss class ReversibleSequence(nn.Module): def __init__(self, blocks, args_route = {}, layer_dropout = 0.): super().__init__() self.args_route = args_route self.layer_dropout = layer_dropout self.blocks = nn.ModuleList([ReversibleBlock(f, g) for f, g in blocks]) def forward(self, x, **kwargs): x = torch.cat([x, x], dim=-1) blocks = self.blocks args = route_args(self.args_route, kwargs, len(blocks)) args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args)) layers_and_args = list(zip(blocks, args)) if self.training and self.layer_dropout > 0: layers_and_args = layer_drop(layers_and_args, self.layer_dropout) blocks, args = map(lambda ind: list(map(itemgetter(ind), layers_and_args)), (0, 1)) out, f_loss, g_loss = _ReversibleFunction.apply(x, blocks, args) out = torch.stack(out.chunk(2, dim=-1)).mean(dim=0) aux_loss = f_loss.sum() + g_loss.sum() return out, aux_loss
routing-transformer-master
routing_transformer/reversible.py
import re from inspect import isfunction import torch from torch import nn from routing_transformer.routing_transformer import RoutingTransformerLM, update_kmeans_on_backwards from routing_transformer.autoregressive_wrapper import AutoregressiveWrapper ENC_PREFIX = 'enc_' DEC_PREFIX = 'dec_' def default(x, d): if x is None: return d if not isfunction(d) else d() return x def group_dict_by_key(cond, d): return_val = [dict(),dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return bool(re.match(f'^{prefix}', str)) def group_by_key_prefix(prefix, d): return group_dict_by_key(lambda x: string_begins_with(prefix, x), d) def group_by_key_prefix_and_remove_prefix(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: string_begins_with(prefix, x), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs def extract_enc_dec_kwargs(kwargs): enc_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(ENC_PREFIX, kwargs) dec_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(DEC_PREFIX, kwargs) return enc_kwargs, dec_kwargs, kwargs def extract_and_set_enc_dec_kwargs(kwargs): enc_kwargs, dec_kwargs, kwargs = extract_enc_dec_kwargs(kwargs) if 'input_mask' in enc_kwargs: dec_kwargs.setdefault('context_mask', enc_kwargs['input_mask']) return enc_kwargs, dec_kwargs, kwargs class RoutingTransformerEncDec(nn.Module): def __init__(self, dim, ignore_index = None, pad_value = 0, **kwargs): super().__init__() ignore_index = default(ignore_index, pad_value) enc_kwargs, dec_kwargs, _ = extract_enc_dec_kwargs(kwargs) assert 'return_embedding' not in enc_kwargs, 'you cannot manually set the return embeddings flag for the encoder' assert 'dim' not in dec_kwargs and 'dim' not in enc_kwargs, 'you must set the dim for both encoder and decoder' enc_kwargs['dim'] = dec_kwargs['dim'] = dim enc_kwargs['return_embeddings'] = True dec_kwargs['causal'] = True dec_kwargs['receives_context'] = True enc_kwargs['_register_kmeans_update'] = dec_kwargs['_register_kmeans_update'] = False enc_kwargs.setdefault('window_size', 256) dec_kwargs.setdefault('window_size', 256) enc = RoutingTransformerLM(**enc_kwargs) dec = RoutingTransformerLM(**dec_kwargs) self.enc = enc self.dec = AutoregressiveWrapper(dec, ignore_index = ignore_index, pad_value = pad_value) # user will have to manually call backwards on encoder auxiliary loss if the decoder reversibility is turned on # should place a bug bounty on this self.dec_reversible = dec_kwargs.pop('reversible', False) # display a warning message if self.dec_reversible: print('Warning! Due to an issue with reversible nets and encoder auxiliary losses, you must explicitly call backwards on the encoder auxiliary loss, which is supplied as the second element of the returned tuple on forward') self._handle = None self.register_kmeans_update() def cancel_kmeans_update(self): if self._handle is None: return self._handle.remove() self._handle = None def register_kmeans_update(self): self.cancel_kmeans_update() return update_kmeans_on_backwards(self) @torch.no_grad() def generate(self, seq_in, seq_out_start, max_seq_len = None, **kwargs): max_seq_len = default(max_seq_len, self.dec.max_seq_len) enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs) context, _ = self.enc(seq_in, **enc_kwargs) return self.dec.generate(seq_out_start, max_seq_len, context = context, **{**dec_kwargs, **kwargs}) def forward(self, seq_in, seq_out, return_loss = False, randomly_truncate_sequence = False, **kwargs): enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs) context, enc_aux_loss = self.enc(seq_in, **enc_kwargs) loss = self.dec(seq_out, return_loss = return_loss, randomly_truncate_sequence = randomly_truncate_sequence, context = context, aux_loss = enc_aux_loss, **dec_kwargs) # if decoder reversibility turned on, user must manually call backward on encoder auxiliary losses if self.dec_reversible: return loss, enc_aux_loss aux_loss = torch.tensor(0., requires_grad = True) loss = loss + enc_aux_loss return loss, aux_loss
routing-transformer-master
routing_transformer/encoder_decoder.py
from routing_transformer.routing_transformer import RoutingTransformer, RoutingTransformerLM, KmeansAttention, update_kmeans_on_backwards from routing_transformer.encoder_decoder import RoutingTransformerEncDec from routing_transformer.autoregressive_wrapper import AutoregressiveWrapper from routing_transformer.autopadder import Autopadder
routing-transformer-master
routing_transformer/__init__.py
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import partial, reduce, wraps from einops import rearrange, repeat from einops.layers.torch import Rearrange from local_attention import LocalAttention from product_key_memory import PKM from mixture_of_experts import MoE from routing_transformer.reversible import ReversibleSequence, SequentialSequence # constants TOKEN_SELF_ATTN_VALUE = -5e4 KMEAN_INIT_ITERS = 10 # helper functions def exists(val): return val is not None def identity(x, *args, **kwargs): return x def default(x, d): if not exists(x): return d if not isfunction(d) else d() return x def cast_tuple(x): return x if isinstance(x, tuple) else (x,) def cache_fn(f): cache = None @wraps(f) def cached_fn(*args, **kwargs): nonlocal cache if exists(cache): return cache cache = f(*args, **kwargs) return cache return cached_fn def compose(*fns): def inner(x, *args, **kwargs): for fn in reversed(fns): x = fn(x, *args, **kwargs) return x return inner def to(t): return {'device': t.device, 'dtype': t.dtype} def find_modules(nn_module, type): return [module for module in nn_module.modules() if isinstance(module, type)] def is_empty(t): return t.nelement() == 0 def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def batched_index_select(values, indices): last_dim = values.shape[-1] return values.gather(2, expand_dim(indices, -1, last_dim)) def merge_dims(ind_from, ind_to, tensor): shape = list(tensor.shape) arr_slice = slice(ind_from, ind_to + 1) shape[arr_slice] = [reduce(mul, shape[arr_slice])] return tensor.reshape(*shape) def expand_dim(t, dim, k): t = t.unsqueeze(dim) expand_shape = [-1] * len(t.shape) expand_shape[dim] = k return t.expand(*expand_shape) def scatter_mean(src, t, index, dim, eps = 1e-5): numer = src.scatter_add(dim, index, t) denom = src.scatter_add(dim, index, torch.ones_like(t)) return numer / (denom + eps) def split_at_index(dim, index, t): pre_slices = (slice(None),) * dim l = (*pre_slices, slice(None, index)) r = (*pre_slices, slice(index, None)) return t[l], t[r] def reshape_dim(t, dim, split_dims): shape = list(t.shape) num_dims = len(shape) dim = (dim + num_dims) % num_dims shape[dim:dim+1] = split_dims return t.reshape(shape) def ema(old, new, decay): if not exists(old): return new return old * decay + new * (1 - decay) def ema_inplace(moving_avg, new, decay): if is_empty(moving_avg): moving_avg.data.copy_(new) return moving_avg.data.mul_(decay).add_(new, alpha= (1 - decay)) # helper classes def map_first_tuple_or_el(x, fn): if isinstance(x, tuple): return (fn(x[0]),) + x[1:] return fn(x) class Chunk(nn.Module): def __init__(self, chunks, fn, along_dim = -1): super().__init__() self.dim = along_dim self.chunks = chunks self.fn = fn def forward(self, x, **kwargs): if self.chunks <= 1: return self.fn(x, **kwargs) chunks = x.chunk(self.chunks, dim = self.dim) return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim) class PreNorm(nn.ModuleList): def __init__(self, norm_class, dim, fn): super().__init__() self.norm = norm_class(dim) self.fn = fn def forward(self, x, **kwargs): x = self.norm(x) return self.fn(x, **kwargs) class ReZero(nn.Module): def __init__(self, fn): super().__init__() self.residual_weight = nn.Parameter(torch.zeros(1)) self.fn = fn def forward(self, x, **kwargs): x = self.fn(x, **kwargs) return map_first_tuple_or_el(x, lambda t: t * self.residual_weight) class ScaleNorm(nn.Module): def __init__(self, dim, eps=1e-5): super().__init__() self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, x): def norm(t): n = torch.norm(t, dim=-1, keepdim=True).clamp(min=self.eps) return t / n * self.g return map_first_tuple_or_el(x, norm) class ProjectInOut(nn.Module): def __init__(self, fn, dim_in, dim_out, project_out = True): super().__init__() self.fn = fn self.project_in = nn.Linear(dim_in, dim_out) self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity def forward(self, x, **kwargs): x = self.project_in(x) x, loss = self.fn(x, **kwargs) x = self.project_out(x) return x, loss class MatrixMultiply(nn.Module): def __init__(self, tensor, transpose = False): super().__init__() self.tensor = tensor self.transpose = transpose def forward(self, x): tensor = self.tensor if self.transpose: tensor = tensor.t() return x @ tensor # token shift def shift(t, amount, mask = None): if amount == 0: return t if exists(mask): t = t.masked_fill(~mask[..., None], 0.) return F.pad(t, (0, 0, amount, -amount), value = 0.) class PreShiftTokens(nn.Module): def __init__(self, shifts, fn): super().__init__() self.fn = fn self.shifts = tuple(shifts) def forward(self, x, **kwargs): mask = kwargs.get('mask', None) shifts = self.shifts segments = len(shifts) feats_per_shift = x.shape[-1] // segments splitted = x.split(feats_per_shift, dim = -1) segments_to_shift, rest = splitted[:segments], splitted[segments:] segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts))) x = torch.cat((*segments_to_shift, *rest), dim = -1) return self.fn(x, **kwargs) # positional embeddings class FixedPositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) position = torch.arange(0, max_seq_len, dtype=torch.float) sinusoid_inp = torch.einsum("i,j->ij", position, inv_freq) emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) self.register_buffer('emb', emb) def forward(self, x): return self.emb[None, :x.shape[1], :].to(x) def rotate_every_two(x): x = rearrange(x, '... (d j) -> ... d j', j = 2) x1, x2 = x.unbind(dim = -1) x = torch.stack((-x2, x1), dim = -1) return rearrange(x, '... d j -> ... (d j)') def apply_rotary_pos_emb(q, k, v, sinu_pos): sinu_pos = sinu_pos.type(q.dtype) sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2) sin, cos = sinu_pos.unbind(dim = -2) sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j = 2), (sin, cos)) q, k, v = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k, v)) return q, k, v # kmeans related function and class def update_kmeans_on_backwards(module): module.kmean_modules = find_modules(module, Kmeans) def hook(_, grad_in, grad_out): for m in module.kmean_modules: m.update() return module.register_backward_hook(hook) def similarity(x, means): return torch.einsum('bhld,hcd->bhlc', x, means) def dists_and_buckets(x, means): dists = similarity(x, means) _, buckets = torch.max(dists, dim=-1) return dists, buckets def batched_bincount(index, num_classes, dim=-1): shape = list(index.shape) shape[dim] = num_classes out = index.new_zeros(shape) out.scatter_add_(dim, index, torch.ones_like(index, dtype=index.dtype)) return out def kmeans_iter(x, means, buckets = None): b, h, l, d, dtype, num_clusters = *x.shape, x.dtype, means.shape[1] if not exists(buckets): _, buckets = dists_and_buckets(x, means) bins = batched_bincount(buckets, num_clusters).sum(0, keepdim=True) zero_mask = bins.long() == 0 means_ = buckets.new_zeros(b, h, num_clusters, d, dtype=dtype) means_.scatter_add_(-2, expand_dim(buckets, -1, d), x) means_ = F.normalize(means_.sum(0, keepdim=True), dim=-1).type(dtype) means = torch.where(zero_mask.unsqueeze(-1), means, means_) means = means.squeeze(0) return means def distribution(dists, window_size): _, topk_indices = dists.topk(k=window_size, dim=-2) indices = topk_indices.transpose(-2, -1) return indices.reshape(*indices.size()[:2], -1) class Kmeans(nn.Module): def __init__(self, num_heads, head_dim, num_clusters, ema_decay = 0.999, commitment = 1e-4): super().__init__() self.commitment = commitment self.ema_decay = ema_decay self.register_buffer('means', torch.randn(num_heads, num_clusters, head_dim)) self.register_buffer('initted', torch.tensor(False)) self.num_new_means = 0 self.new_means = None @torch.no_grad() def init(self, x): if self.initted: return _, h, _, d, device, dtype = *x.shape, x.device, x.dtype num_clusters = self.means.shape[1] means = x.transpose(0, 1).contiguous().view(h, -1, d) num_samples = means.shape[1] if num_samples >= num_clusters: indices = torch.randperm(num_samples, device=device)[:num_clusters] else: indices = torch.randint(0, num_samples, (num_clusters,), device=device) means = means[:, indices] for _ in range(KMEAN_INIT_ITERS): means = kmeans_iter(x, means) self.num_new_means = 0 self.means.data.copy_(means) self.initted.data.copy_(torch.tensor(True)) @torch.no_grad() def update(self, new_means = None): new_means = default(new_means, self.new_means) assert exists(new_means), 'new kmeans has not been supplied' ema_inplace(self.means, new_means, self.ema_decay) del self.new_means self.new_means = None self.num_new_means = 0 def forward(self, x, update_means = False): self.init(x) b, dtype = x.shape[0], x.dtype means = self.means.type(dtype) x = F.normalize(x, 2, dim=-1).type(dtype) with torch.no_grad(): dists, buckets = dists_and_buckets(x, means) routed_means = batched_index_select(expand_dim(means, 0, b), buckets) loss = F.mse_loss(x, routed_means) * self.commitment if update_means: with torch.no_grad(): means = kmeans_iter(x, means, buckets) self.new_means = ema(self.new_means, means, self.num_new_means / (self.num_new_means + 1)) self.num_new_means += 1 return dists, loss # kmeans attention class class KmeansAttention(nn.Module): def __init__(self, num_clusters, window_size, num_heads, head_dim, causal = False, dropout = 0., ema_decay = 0.999, commitment = 1e-4, context_window_size = None, receives_context = False, num_mem_kv = 0, shared_qk = False): super().__init__() self.num_heads = num_heads self.num_clusters = num_clusters self.head_dim = head_dim self.window_size = window_size self.context_window_size = default(context_window_size, window_size) self.causal = causal self.shared_qk = shared_qk self.receives_context = receives_context self.kmeans = Kmeans(num_heads, head_dim, num_clusters, ema_decay, commitment) self.dropout = nn.Dropout(dropout) self.num_mem_kv = max(num_mem_kv, 1 if causal and not shared_qk else 0) self.mem_key = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim)) self.mem_value = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim)) def forward(self, q, k, v, query_mask = None, key_mask = None, **kwargs): b, h, t, d, kv_t, wsz, c_wsz, nc, device, dtype = *q.shape, k.shape[2], self.window_size, self.context_window_size, self.num_clusters, q.device, q.dtype is_reverse = kwargs.pop('_reverse', False) out = torch.zeros_like(q, dtype=dtype) update_kmeans = self.training and not is_reverse key_mask = default(key_mask, query_mask) if not self.receives_context else key_mask kv_wsz = wsz if not self.receives_context else c_wsz wsz = min(wsz, t) kv_wsz = min(kv_wsz, kv_t) if not self.shared_qk or self.receives_context: dists, aux_loss = self.kmeans(torch.cat((q, k), dim=2), update_kmeans) q_dists, k_dists = split_at_index(2, t, dists) indices = distribution(q_dists, wsz) kv_indices = distribution(k_dists, kv_wsz) else: dists, aux_loss = self.kmeans(q, update_kmeans) k = F.normalize(k, dim=-1).to(q) indices = distribution(dists, wsz) kv_indices = indices q = batched_index_select(q, indices) k = batched_index_select(k, kv_indices) v = batched_index_select(v, kv_indices) reshape_with_window = lambda x: x.reshape(b, h, nc, -1, d) q, k, v = map(reshape_with_window, (q, k, v)) m_k, m_v = map(lambda x: expand_dim(x, 0, b).to(q), (self.mem_key, self.mem_value)) k, v = map(lambda x: torch.cat(x, dim=3), ((m_k, k), (m_v, v))) dots = torch.einsum('bhnid,bhnjd->bhnij', q, k) * (d ** -0.5) mask_value = max_neg_value(dots) if exists(query_mask) or exists(key_mask): query_mask = default(query_mask, lambda: torch.ones((b, t), device=device).bool()) key_mask = default(key_mask, lambda: torch.ones((b, kv_t), device=device).bool()) q_mask = expand_dim(query_mask, 1, h).gather(2, indices) kv_mask = expand_dim(key_mask, 1, h).gather(2, kv_indices) q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (q_mask, kv_mask)) mask = q_mask[:, :, :, :, None] * kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=True) dots.masked_fill_(~mask, mask_value) del mask if self.causal: q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices)) mask = q_mask[:, :, :, :, None] >= kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=True) dots.masked_fill_(~mask, mask_value) del mask if self.shared_qk: q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices)) mask = q_mask[:, :, :, :, None] == kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=False) dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE) del mask dots = dots.softmax(dim=-1) dots = self.dropout(dots) bo = torch.einsum('bhcij,bhcjd->bhcid', dots, v) so = torch.reshape(bo, (b, h, -1, bo.shape[-1])).type(dtype) out = scatter_mean(out, so, indices.unsqueeze(-1).expand_as(so), -2) return out, aux_loss # feedforward class GELU_(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_ class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False): super().__init__() activation = default(activation, GELU) self.glu = glu self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1)) self.act = activation() self.dropout = nn.Dropout(dropout) self.w2 = nn.Linear(dim * mult, dim) def forward(self, x, **kwargs): if not self.glu: x = self.w1(x) x = self.act(x) else: x, v = self.w1(x).chunk(2, dim=-1) x = self.act(x) * v x = self.dropout(x) x = self.w2(x) return x # self attention class SelfAttention(nn.Module): def __init__(self, dim, depth, max_seq_len, heads, local_attn_heads, window_size, dim_head = None, local_attn_window_size = None, local_attn_radius_blocks = 1, causal = False, attn_dropout = 0., dropout = 0., kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, num_mem_kv = 0, shared_qk = False, conv_query_kernel = 9): super().__init__() assert dim_head or (dim % heads) == 0, 'hidden dimension must be divisible by number of heads' assert (max_seq_len % window_size) == 0, 'maximum sequence length must be divisible by the target window size' assert local_attn_heads <= heads, 'number of local attention heads must be less than total heads' assert not (receives_context and local_attn_heads > 0), 'local attention cannot be used for self attention with context' assert not (receives_context and causal), 'contextual attention layer cannot be causal' local_attn_window_size = default(local_attn_window_size, window_size) context_window_size = default(context_window_size, window_size) self.shared_qk = shared_qk self.receives_context = receives_context self.heads = heads self.local_attn_heads = local_attn_heads self.global_attn_heads = heads - local_attn_heads self.causal = causal self.window_size = window_size dim_head = default(dim_head, dim // heads) dim_heads = dim_head * heads self.dim_head = dim_head num_clusters = max_seq_len // window_size # local local_dim_heads = dim_head * self.local_attn_heads if self.local_attn_heads > 0: rel_pos_emb_config = (dim_head, local_attn_heads) if rel_pos_emb else None self.local_attn = LocalAttention(dim = dim_head, window_size = local_attn_window_size, causal = causal, dropout = attn_dropout, rel_pos_emb_config = rel_pos_emb_config, look_backward = local_attn_radius_blocks, look_forward = 0 if causal else local_attn_radius_blocks) self.local_to_qkv = nn.Linear(dim, 3 * local_dim_heads) # global global_dim_heads = dim_head * self.global_attn_heads if self.global_attn_heads > 0: self.global_attn = KmeansAttention(num_clusters, window_size, self.global_attn_heads, dim_head, causal = causal, dropout = attn_dropout, ema_decay = kmeans_ema_decay, commitment = commitment_factor, receives_context = receives_context, num_mem_kv = num_mem_kv, shared_qk = shared_qk) self.to_q = nn.Linear(dim, global_dim_heads, bias = False) self.to_v = nn.Linear(dim, global_dim_heads, bias = False) if not self.shared_qk: self.to_k = nn.Linear(dim, global_dim_heads, bias = False) # out self.to_out = nn.Linear(dim_heads, dim, bias = False) self.dropout = nn.Dropout(dropout) def forward(self, x, context = None, input_mask = None, context_mask = None, pos_emb = None, **kwargs): assert not (self.receives_context and not exists(context)), 'context must be passed if self attention is set to receive context' b, t, e, h, dh = *x.shape, self.heads, self.dim_head has_local, has_global = map(lambda x: x > 0, (self.local_attn_heads, self.global_attn_heads)) split_heads = lambda v: reshape_dim(v, -1, (-1, dh)).transpose(1, 2).contiguous() if has_local: local_qkv = self.local_to_qkv(x).chunk(3, dim=-1) lq, lk, lv = map(split_heads, local_qkv) if has_global: kv_input = x if not self.receives_context else context q, v = self.to_q(x), self.to_v(kv_input) if not self.shared_qk: k = self.to_k(kv_input) else: k = self.to_q(kv_input) if self.receives_context else q q, k, v = map(split_heads, (q, k, v)) out = [] total_loss = torch.tensor(0., requires_grad=True, **to(x)) if has_local: local_out = self.local_attn(lq, lk, lv, input_mask = input_mask) out.append(local_out) if has_global: if not self.receives_context and exists(pos_emb): q, k, v = apply_rotary_pos_emb(q, k, v, pos_emb) global_out, loss = self.global_attn(q, k, v, query_mask = input_mask, key_mask = context_mask) total_loss = total_loss + loss out.append(global_out) out = torch.cat(out, dim=1) out = out.reshape(b, h, t, -1).transpose(1, 2).reshape(b, t, -1) out = self.to_out(out) return self.dropout(out), total_loss class RoutingTransformer(nn.Module): def __init__( self, dim, depth, max_seq_len, heads = 8, dim_head = None, window_size = 64, local_attn_window_size = 256, local_attn_radius_blocks = 1, causal = False, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., n_local_attn_heads = 0, ff_glu = False, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, _register_kmeans_update = False, rel_pos_emb = True, pkm_layers = tuple(), pkm_num_keys = 128, moe_layers = tuple(), moe_num_experts = 4, moe_loss_coef = 1e-2, num_mem_kv = 0, shared_qk = None, context_shared_qk = False, use_rezero = False, use_scale_norm = False, ff_activation = None, shift_tokens = False ): super().__init__() shared_qk = default(shared_qk, causal) # default to shared qk when causal, due to experimental results if type(n_local_attn_heads) is not tuple: n_local_attn_heads = tuple([n_local_attn_heads] * depth) assert len(n_local_attn_heads) == depth, 'local attention heads tuple must have the same length as the depth' assert all([(local_heads <= heads) for local_heads in n_local_attn_heads]), 'number of local attn heads must be less than the maximum number of heads' layers = nn.ModuleList([]) norm_type = ScaleNorm if use_scale_norm else nn.LayerNorm fn_wrapper = partial(ReZero) if use_rezero else partial(PreNorm, norm_type, dim) if shift_tokens: shifts = (-1, 0, 1) if not causal else (0, 1) fn_wrapper = compose(fn_wrapper, partial(PreShiftTokens, shifts)) get_attn = lambda local_heads: SelfAttention(dim, depth, max_seq_len, heads, local_heads, window_size, causal = causal, dim_head = dim_head, local_attn_window_size = local_attn_window_size, local_attn_radius_blocks = local_attn_radius_blocks, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, rel_pos_emb = rel_pos_emb, num_mem_kv = num_mem_kv, shared_qk = shared_qk) get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu, activation = ff_activation), along_dim=1) get_context_attn = lambda: SelfAttention(dim, depth, max_seq_len, heads, 0, window_size, dim_head = dim_head, local_attn_window_size = local_attn_window_size, local_attn_radius_blocks = local_attn_radius_blocks, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, receives_context = True, context_window_size = context_window_size, num_mem_kv = num_mem_kv, shared_qk = context_shared_qk) get_context_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu, activation = ff_activation), along_dim=1) get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys) get_moe = lambda: MoE(dim, num_experts = moe_num_experts, loss_coef = moe_loss_coef) if weight_tie: assert len(set(n_local_attn_heads)) == 1, 'you can only weight tie if number of local attention heads for all layers is the same' get_attn, get_ff, get_context_attn, get_context_ff, get_pkm, get_moe = map(cache_fn, (get_attn, get_ff, get_context_attn, get_context_ff, get_pkm, get_moe)) for ind, local_heads in zip(range(depth), n_local_attn_heads): layer = ind + 1 use_pkm = layer in cast_tuple(pkm_layers) use_moe = layer in cast_tuple(moe_layers) get_parallel_fn = get_pkm if use_pkm else get_ff get_parallel_fn = get_moe if use_moe else get_parallel_fn layers.append(nn.ModuleList([ fn_wrapper(get_attn(local_heads)), fn_wrapper(get_parallel_fn()) ])) if not receives_context: continue layers.append(nn.ModuleList([ fn_wrapper(get_context_attn()), fn_wrapper(get_context_ff()) ])) execute_type = ReversibleSequence if reversible else SequentialSequence attn_context_layer = ((True, False),) if receives_context else tuple() route_attn = ((True, False), *attn_context_layer) * depth route_context = ((False, False), *attn_context_layer) * depth context_route_map = {'context': route_context, 'context_mask': route_context} if receives_context else {} attn_route_map = {'input_mask': route_attn, 'pos_emb': route_attn} self.layers = execute_type(layers, args_route = {**attn_route_map, **context_route_map}, layer_dropout = layer_dropout) self._handle = None if _register_kmeans_update: self.register_kmeans_update() has_local_attn = any([num > 0 for num in n_local_attn_heads]) local_attn_window_size = default(local_attn_window_size, window_size) self.pad_to_multiple = local_attn_window_size if has_local_attn else 0 def cancel_kmeans_update(self): if not exists(self._handle): return self._handle.remove() self._handle = None def register_kmeans_update(self): self._handle = update_kmeans_on_backwards(self) def forward(self, x, **kwargs): x, loss = self.layers(x, **kwargs) return x, loss class RoutingTransformerLM(nn.Module): def __init__( self, num_tokens, dim, depth, max_seq_len, heads = 8, dim_head = 64, window_size = 64, local_attn_window_size = None, local_attn_radius_blocks = 1, causal = False, emb_dim = None, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., ff_mult = 4, ff_activation = None, ff_glu = False, return_embeddings = False, n_local_attn_heads = 0, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, _register_kmeans_update = True, pkm_layers = tuple(), pkm_num_keys = 128, moe_layers = tuple(), moe_num_experts = 4, moe_loss_coef = 1e-2, num_mem_kv = 0, shared_qk = None, context_shared_qk = False, use_rezero = False, use_scale_norm = False, tie_embedding = False, use_absolute_pos_emb = False, shift_tokens = False ): super().__init__() assert (max_seq_len % window_size) == 0, 'max sequence length must be divisible by the window size, to calculate number of kmeans cluster' emb_dim = default(emb_dim, dim) self.max_seq_len = max_seq_len self.sinu_pos_emb = FixedPositionalEmbedding(dim_head, max_seq_len) self.token_emb = nn.Embedding(num_tokens, emb_dim) nn.init.normal_(self.token_emb.weight, std = 0.02) self.routing_transformer = RoutingTransformer(dim, depth, max_seq_len, heads = heads, dim_head = dim_head, window_size = window_size, local_attn_window_size = local_attn_window_size, local_attn_radius_blocks = local_attn_radius_blocks, causal = causal, weight_tie = weight_tie, ff_dropout = ff_dropout, attn_dropout = attn_dropout, attn_layer_dropout = attn_layer_dropout, layer_dropout = layer_dropout, n_local_attn_heads = n_local_attn_heads, ff_glu = ff_glu, reversible = reversible, ff_chunks = ff_chunks, kmeans_ema_decay = kmeans_ema_decay, receives_context = receives_context, context_window_size = context_window_size, rel_pos_emb = rel_pos_emb, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys, moe_layers = moe_layers, moe_num_experts = moe_num_experts, moe_loss_coef = moe_loss_coef, num_mem_kv = num_mem_kv, shared_qk = shared_qk, context_shared_qk = context_shared_qk, _register_kmeans_update = _register_kmeans_update, use_rezero = use_rezero, use_scale_norm = use_scale_norm, ff_activation = ff_activation, shift_tokens = shift_tokens) if emb_dim != dim: self.routing_transformer = ProjectInOut(self.routing_transformer, emb_dim, dim, project_out = not return_embeddings) self.norm = nn.LayerNorm(emb_dim) if return_embeddings: self.out = nn.Identity() elif tie_embedding: self.out = MatrixMultiply(self.token_emb.weight, transpose = True) else: self.out = nn.Linear(emb_dim, num_tokens) def cancel_kmeans_update(self): transformer = find_modules(self, RoutingTransformer)[0] transformer.cancel_kmeans_update() def update_kmeans(self): for m in find_modules(self, Kmeans): m.update() def forward(self, x, **kwargs): x = self.token_emb(x) rotary_pos_emb = self.sinu_pos_emb(x) x, loss = self.routing_transformer(x, pos_emb = rotary_pos_emb, **kwargs) x = self.norm(x) return self.out(x), loss
routing-transformer-master
routing_transformer/routing_transformer.py
import os import re from subprocess import check_call from setuptools import setup, find_packages from setuptools.command.install import install __pkg_name__ = 'bonito' verstrline = open(os.path.join(__pkg_name__, '__init__.py'), 'r').read() vsre = r"^__version__ = ['\"]([^'\"]*)['\"]" mo = re.search(vsre, verstrline, re.M) if mo: __version__ = mo.group(1) else: raise RuntimeError('Unable to find version string in "{}/__init__.py".'.format(__pkg_name__)) USE_CUDA111 = False if USE_CUDA111: print("Building with CUDA 11.1") require_file = 'requirements-cuda111.txt' package_name = "ont-%s-cuda111" % __pkg_name__ else: print("Building with CUDA 10.2") require_file = 'requirements.txt' package_name = "ont-%s" % __pkg_name__ with open(require_file) as f: requirements = f.read().splitlines() with open('README.md', encoding='utf-8') as f: long_description = f.read() class download_latest_model(install): def run(self): install.run(self) check_call("bonito download --models --latest -f".split()) setup( name=package_name, version=__version__, packages=find_packages(), include_package_data=True, install_requires=requirements, long_description=long_description, long_description_content_type='text/markdown', author='Oxford Nanopore Technologies, Ltd', author_email='[email protected]', url='https://github.com/nanoporetech/bonito', cmdclass={ 'install': download_latest_model, }, entry_points = { 'console_scripts': [ '{0} = {0}:main'.format(__pkg_name__) ] }, dependency_links=[ 'https://download.pytorch.org/whl/torch_stable.html', ] )
bonito-master
setup.py
""" Bonito Aligner """ from threading import Thread from functools import partial from mappy import Aligner, ThreadBuffer from bonito.multiprocessing import ThreadMap, ProcessMap def align_map(aligner, sequences, n_thread=4): """ Align `sequences` with minimap using `n_thread` threads. """ return ThreadMap(partial(MappyWorker, aligner), sequences, n_thread) class MappyWorker(Thread): """ Process that reads items from an input_queue, applies a func to them and puts them on an output_queue """ def __init__(self, aligner, input_queue=None, output_queue=None): super().__init__() self.aligner = aligner self.input_queue = input_queue self.output_queue = output_queue def run(self): thrbuf = ThreadBuffer() while True: item = self.input_queue.get() if item is StopIteration: self.output_queue.put(item) break k, v = item mapping = next(self.aligner.map(v['sequence'], buf=thrbuf, MD=True), None) self.output_queue.put((k, {**v, 'mapping': mapping}))
bonito-master
bonito/aligner.py
""" Bonito Fast5 Utils """ import sys from glob import glob from pathlib import Path from functools import partial from multiprocessing import Pool from itertools import chain, starmap import torch import numpy as np from scipy.signal import find_peaks from ont_fast5_api.fast5_interface import get_fast5_file class Read: def __init__(self, read, filename): self.read_id = read.read_id self.filename = filename.name self.run_id = read.get_run_id() if type(self.run_id) in (bytes, np.bytes_): self.run_id = self.run_id.decode() read_attrs = read.handle[read.raw_dataset_group_name].attrs channel_info = read.handle[read.global_key + 'channel_id'].attrs self.offset = int(channel_info['offset']) self.sampling_rate = channel_info['sampling_rate'] self.scaling = channel_info['range'] / channel_info['digitisation'] self.mux = read_attrs['start_mux'] self.channel = channel_info['channel_number'] if type(self.channel) in (bytes, np.bytes_): self.channel = self.channel.decode() self.start = read_attrs['start_time'] / self.sampling_rate self.duration = read_attrs['duration'] / self.sampling_rate raw = read.handle[read.raw_dataset_name][:] scaled = np.array(self.scaling * (raw + self.offset), dtype=np.float32) trim_start, _ = trim(scaled[:8000]) scaled = scaled[trim_start:] self.template_start = self.start + (1 / self.sampling_rate) * trim_start self.template_duration = self.duration - (1 / self.sampling_rate) * trim_start if len(scaled) > 8000: med, mad = med_mad(scaled) self.signal = (scaled - med) / mad else: self.signal = norm_by_noisiest_section(scaled) def __repr__(self): return "Read('%s')" % self.read_id class ReadChunk: def __init__(self, read, chunk, i, n): self.read_id = "%s:%i:%i" % (read.read_id, i, n) self.run_id = read.run_id self.filename = read.filename self.mux = read.mux self.channel = read.channel self.start = read.start self.duration = read.duration self.template_start = self.start self.template_duration = self.duration self.signal = chunk def __repr__(self): return "ReadChunk('%s')" % self.read_id def trim(signal, window_size=40, threshold_factor=2.4, min_elements=3): min_trim = 10 signal = signal[min_trim:] med, mad = med_mad(signal[-(window_size*100):]) threshold = med + mad * threshold_factor num_windows = len(signal) // window_size seen_peak = False for pos in range(num_windows): start = pos * window_size end = start + window_size window = signal[start:end] if len(window[window > threshold]) > min_elements or seen_peak: seen_peak = True if window[-1] > threshold: continue return min(end + min_trim, len(signal)), len(signal) return min_trim, len(signal) def med_mad(x, factor=1.4826): """ Calculate signal median and median absolute deviation """ med = np.median(x) mad = np.median(np.absolute(x - med)) * factor return med, mad def norm_by_noisiest_section(signal, samples=100, threshold=6.0): """ Normalise using the medmad from the longest continuous region where the noise is above some threshold relative to the std of the full signal. """ threshold = signal.std() / threshold noise = np.ones(signal.shape) for idx in np.arange(signal.shape[0] // samples): window = slice(idx * samples, (idx + 1) * samples) noise[window] = np.where(signal[window].std() > threshold, 1, 0) # start and end low for peak finding noise[0] = 0; noise[-1] = 0 peaks, info = find_peaks(noise, width=(None, None)) if len(peaks): widest = np.argmax(info['widths']) med, mad = med_mad(signal[info['left_bases'][widest]: info['right_bases'][widest]]) else: med, mad = med_mad(signal) return (signal - med) / mad def read_chunks(read, chunksize=4000, overlap=400): """ Split a Read in fixed sized ReadChunks """ if len(read.signal) < chunksize: return _, offset = divmod(len(read.signal) - chunksize, chunksize - overlap) signal = torch.from_numpy(read.signal[offset:]) blocks = signal.unfold(0, chunksize, chunksize - overlap) for i, block in enumerate(blocks): yield ReadChunk(read, block.numpy(), i+1, blocks.shape[0]) def get_raw_data(filename, read_ids=None, skip=False): """ Get the raw signal and read id from the fast5 files """ with get_fast5_file(filename, 'r') as f5_fh: for read_id in f5_fh.get_read_ids(): if read_ids is None or (read_id in read_ids) ^ skip: yield Read(f5_fh.get_read(read_id), filename) def get_read_ids(filename, read_ids=None, skip=False): """ Get all the read_ids from the file `filename`. """ with get_fast5_file(filename, 'r') as f5_fh: ids = [(filename, rid) for rid in f5_fh.get_read_ids()] if read_ids is None: return ids return [rid for rid in ids if (rid[1] in read_ids) ^ skip] def get_raw_data_for_read(info): """ Get the raw signal from the fast5 file for a given filename, read_id pair """ filename, read_id = info with get_fast5_file(filename, 'r') as f5_fh: return Read(f5_fh.get_read(read_id), filename) def get_reads(directory, read_ids=None, skip=False, max_read_size=0, n_proc=1, recursive=False, cancel=None): """ Get all reads in a given `directory`. """ pattern = "**/*.fast5" if recursive else "*.fast5" get_filtered_reads = partial(get_read_ids, read_ids=read_ids, skip=skip) with Pool(n_proc) as pool: for job in chain(pool.imap(get_filtered_reads, (Path(x) for x in glob(directory + "/" + pattern, recursive=True)))): for read in pool.imap(get_raw_data_for_read, job): if max_read_size > 0 and len(read.signal) > max_read_size: sys.stderr.write( "> skipping long read %s (%s samples)\n" % (read.read_id, len(read.signal)) ) continue yield read if cancel is not None and cancel.is_set(): return
bonito-master
bonito/fast5.py
""" Bonito utils """ import os import re import sys import random from glob import glob from itertools import groupby from operator import itemgetter from importlib import import_module from collections import deque, defaultdict, OrderedDict import toml import torch import parasail import numpy as np from torch.cuda import get_device_capability try: from claragenomics.bindings import cuda from claragenomics.bindings.cudapoa import CudaPoaBatch except ImportError: pass __dir__ = os.path.dirname(os.path.realpath(__file__)) __data__ = os.path.join(__dir__, "data") __models__ = os.path.join(__dir__, "models") __configs__ = os.path.join(__dir__, "models/configs") split_cigar = re.compile(r"(?P<len>\d+)(?P<op>\D+)") default_data = os.path.join(__data__, "dna_r9.4.1") default_config = os.path.join(__configs__, "[email protected]") def init(seed, device): """ Initialise random libs and setup cudnn https://pytorch.org/docs/stable/notes/randomness.html """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if device == "cpu": return torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False assert(torch.cuda.is_available()) def permute(x, input_layout, output_layout): """ Permute `x` from `input_layout` to `output_layout` >>> permute(x, 'TNC', 'NTC') """ if input_layout == output_layout: return x return x.permute(*[input_layout.index(x) for x in output_layout]) def concat(xs, dim=0): """ Type agnostic concat. """ if isinstance(xs[0], torch.Tensor): return torch.cat(xs, dim=dim) elif isinstance(xs[0], np.ndarray): return np.concatenate(xs, axis=dim) elif isinstance(xs[0], list): return [x for l in xs for x in l] elif isinstance(xs[0], str): return ''.join(xs) elif isinstance(xs[0], dict): return {k: concat([x[k] for x in xs], dim) for k in xs[0].keys()} else: raise TypeError def select_range(x, start, end, dim=0): """ Type agnostic range select. """ if isinstance(x, dict): return {k: select_range(v, start, end, dim) for (k, v) in x.items()} if dim == 0 or isinstance(x, list): return x[start:end] return x[(*(slice(None),)*dim, slice(start, end))] def size(x, dim=0): """ Type agnostic size. """ if hasattr(x, 'shape'): return x.shape[dim] elif dim == 0: return len(x) raise TypeError def half_supported(): """ Returns whether FP16 is support on the GPU """ try: return get_device_capability()[0] >= 7 except: return False def phred(prob, scale=1.0, bias=0.0): """ Converts `prob` into a ascii encoded phred quality score between 0 and 40. """ p = max(1 - prob, 1e-4) q = -10 * np.log10(p) * scale + bias return chr(int(np.round(q) + 33)) def mean_qscore_from_qstring(qstring): """ Convert qstring into a mean qscore """ if len(qstring) == 0: return 0.0 err_probs = [10**((ord(c) - 33) / -10) for c in qstring] mean_err = np.mean(err_probs) return -10 * np.log10(max(mean_err, 1e-4)) def decode_ref(encoded, labels): """ Convert a integer encoded reference into a string and remove blanks """ return ''.join(labels[e] for e in encoded if e) def column_to_set(filename, idx=0, skip_header=False): """ Pull a column from a file and return a set of the values. """ if filename and os.path.isfile(filename): with open(filename, 'r') as tsv: if skip_header: next(tsv) return {line.strip().split()[idx] for line in tsv.readlines()} def chunk(signal, chunksize, overlap): """ Convert a read into overlapping chunks before calling """ T = signal.shape[0] if chunksize == 0: chunks = signal[None, :] elif T < chunksize: chunks = torch.nn.functional.pad(signal, (chunksize - T, 0))[None, :] else: stub = (T - overlap) % (chunksize - overlap) chunks = signal[stub:].unfold(0, chunksize, chunksize - overlap) if stub > 0: chunks = torch.cat([signal[None, :chunksize], chunks], dim=0) return chunks.unsqueeze(1) def stitch(chunks, chunksize, overlap, length, stride, reverse=False): """ Stitch chunks together with a given overlap """ if chunks.shape[0] == 1: return chunks.squeeze(0) semi_overlap = overlap // 2 start, end = semi_overlap // stride, (chunksize - semi_overlap) // stride stub = (length - overlap) % (chunksize - overlap) first_chunk_end = (stub + semi_overlap) // stride if (stub > 0) else end if reverse: chunks = list(chunks) return concat([ chunks[-1][:-start], *(x[-end:-start] for x in reversed(chunks[1:-1])), chunks[0][-first_chunk_end:] ]) else: return concat([ chunks[0, :first_chunk_end], *chunks[1:-1, start:end], chunks[-1, start:] ]) def batchify(items, batchsize, dim=0): """ Batch up items up to `batch_size`. """ stack, pos = [], 0 for k, v in items: breaks = range(batchsize - pos, size(v, dim), batchsize) for start, end in zip([0, *breaks], [*breaks, size(v, dim)]): sub_batch = select_range(v, start, end, dim) stack.append(((k, (pos, pos + end - start)), sub_batch)) if pos + end - start == batchsize: ks, vs = zip(*stack) yield ks, concat(vs, dim) stack, pos = [], 0 else: pos += end - start if len(stack): ks, vs = zip(*stack) yield ks, concat(vs, dim) def unbatchify(batches, dim=0): """ Reconstruct batches. """ batches = ( (k, select_range(v, start, end, dim)) for sub_batches, v in batches for k, (start, end) in sub_batches ) return ( (k, concat([v for (k, v) in group], dim)) for k, group in groupby(batches, itemgetter(0)) ) def load_data(limit=None, directory=None): """ Load the training data """ if directory is None: directory = default_data chunks = np.load(os.path.join(directory, "chunks.npy"), mmap_mode='r') targets = np.load(os.path.join(directory, "references.npy"), mmap_mode='r') lengths = np.load(os.path.join(directory, "reference_lengths.npy"), mmap_mode='r') indices = os.path.join(directory, "indices.npy") if os.path.exists(indices): idx = np.load(indices, mmap_mode='r') idx = idx[idx < lengths.shape[0]] if limit: idx = idx[:limit] return chunks[idx, :], targets[idx, :], lengths[idx] if limit: chunks = chunks[:limit] targets = targets[:limit] lengths = lengths[:limit] return np.array(chunks), np.array(targets), np.array(lengths) def load_symbol(config, symbol): """ Dynamic load a symbol from module specified in model config. """ if not isinstance(config, dict): if not os.path.isdir(config) and os.path.isdir(os.path.join(__models__, config)): dirname = os.path.join(__models__, config) else: dirname = config config = toml.load(os.path.join(dirname, 'config.toml')) imported = import_module(config['model']['package']) return getattr(imported, symbol) def match_names(state_dict, model): keys_and_shapes = lambda state_dict: zip(*[ (k, s) for s, i, k in sorted([(v.shape, i, k) for i, (k, v) in enumerate(state_dict.items())]) ]) k1, s1 = keys_and_shapes(state_dict) k2, s2 = keys_and_shapes(model.state_dict()) assert s1 == s2 remap = dict(zip(k1, k2)) return OrderedDict([(k, remap[k]) for k in state_dict.keys()]) def load_model(dirname, device, weights=None, half=None, chunksize=0): """ Load a model from disk """ if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)): dirname = os.path.join(__models__, dirname) if not weights: # take the latest checkpoint weight_files = glob(os.path.join(dirname, "weights_*.tar")) if not weight_files: raise FileNotFoundError("no model weights found in '%s'" % dirname) weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files]) device = torch.device(device) config = toml.load(os.path.join(dirname, 'config.toml')) weights = os.path.join(dirname, 'weights_%s.tar' % weights) Model = load_symbol(config, "Model") model = Model(config) state_dict = torch.load(weights, map_location=device) state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()} new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k.replace('module.', '') new_state_dict[name] = v model.load_state_dict(new_state_dict) if half is None: half = half_supported() if half: model = model.half() model.eval() model.to(device) return model def parasail_to_sam(result, seq): """ Extract reference start and sam compatible cigar string. :param result: parasail alignment result. :param seq: query sequence. :returns: reference start coordinate, cigar string. """ cigstr = result.cigar.decode.decode() first = re.search(split_cigar, cigstr) first_count, first_op = first.groups() prefix = first.group() rstart = result.cigar.beg_ref cliplen = result.cigar.beg_query clip = '' if cliplen == 0 else '{}S'.format(cliplen) if first_op == 'I': pre = '{}S'.format(int(first_count) + cliplen) elif first_op == 'D': pre = clip rstart = int(first_count) else: pre = '{}{}'.format(clip, prefix) mid = cigstr[len(prefix):] end_clip = len(seq) - result.end_query - 1 suf = '{}S'.format(end_clip) if end_clip > 0 else '' new_cigstr = ''.join((pre, mid, suf)) return rstart, new_cigstr def accuracy(ref, seq, balanced=False, min_coverage=0.0): """ Calculate the accuracy between `ref` and `seq` """ alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull) counts = defaultdict(int) q_coverage = len(alignment.traceback.query) / len(seq) r_coverage = len(alignment.traceback.ref) / len(ref) if r_coverage < min_coverage: return 0.0 _, cigar = parasail_to_sam(alignment, seq) for count, op in re.findall(split_cigar, cigar): counts[op] += int(count) if balanced: accuracy = (counts['='] - counts['I']) / (counts['='] + counts['X'] + counts['D']) else: accuracy = counts['='] / (counts['='] + counts['I'] + counts['X'] + counts['D']) return accuracy * 100 def print_alignment(ref, seq): """ Print the alignment between `ref` and `seq` """ alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull) print(alignment.traceback.ref) print(alignment.traceback.comp) print(alignment.traceback.query) print(" Score=%s" % alignment.score) return alignment.score def poa(groups, max_poa_sequences=100, gpu_mem_per_batch=0.9): """ Generate consensus for POA groups. Args: groups : A list of lists of sequences for which consensus is to be generated. """ free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device()) gpu_mem_per_batch *= free batch = CudaPoaBatch(max_poa_sequences, gpu_mem_per_batch, stream=None, output_type="consensus") results = [] for i, group in enumerate(groups, start=1): group_status, seq_status = batch.add_poa_group(group) # Once batch is full, run POA processing if group_status == 1 or i == len(groups): batch.generate_poa() consensus, coverage, status = batch.get_consensus() results.extend(consensus) batch.reset() group_status, seq_status = batch.add_poa_group(group) return results
bonito-master
bonito/util.py
""" Bonito nn modules. """ import torch from torch import nn from torch.nn import Module from torch.nn.init import orthogonal_ layers = {} def register(layer): layer.name = layer.__name__.lower() layers[layer.name] = layer return layer register(torch.nn.ReLU) register(torch.nn.Tanh) @register class Swish(torch.nn.SiLU): pass @register class Serial(torch.nn.Sequential): def __init__(self, sublayers): super().__init__(*sublayers) def to_dict(self, include_weights=False): return { 'sublayers': [to_dict(layer, include_weights) for layer in self._modules.values()] } @register class Reverse(Module): def __init__(self, sublayers): super().__init__() self.layer = Serial(sublayers) if isinstance(sublayers, list) else sublayers def forward(self, x): return self.layer(x.flip(0)).flip(0) def to_dict(self, include_weights=False): if isinstance(self.layer, Serial): return self.layer.to_dict(include_weights) else: return {'sublayers': to_dict(self.layer, include_weights)} @register class Convolution(Module): def __init__(self, insize, size, winlen, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = torch.nn.Conv1d(insize, size, winlen, stride=stride, padding=padding, bias=bias) self.activation = layers.get(activation, lambda: activation)() def forward(self, x): if self.activation is not None: return self.activation(self.conv(x)) return self.conv(x) def to_dict(self, include_weights=False): res = { "insize": self.conv.in_channels, "size": self.conv.out_channels, "bias": self.conv.bias is not None, "winlen": self.conv.kernel_size[0], "stride": self.conv.stride[0], "padding": self.conv.padding[0], "activation": self.activation.name if self.activation else None, } if include_weights: res['params'] = { 'W': self.conv.weight, 'b': self.conv.bias if self.conv.bias is not None else [] } return res @register class LinearCRFEncoder(Module): def __init__(self, insize, n_base, state_len, bias=True, scale=None, activation=None, blank_score=None): super().__init__() self.n_base = n_base self.state_len = state_len self.blank_score = blank_score size = (n_base + 1) * n_base**state_len if blank_score is None else n_base**(state_len + 1) self.linear = torch.nn.Linear(insize, size, bias=bias) self.activation = layers.get(activation, lambda: activation)() self.scale = scale def forward(self, x): scores = self.linear(x) if self.activation is not None: scores = self.activation(scores) if self.scale is not None: scores = scores * self.scale if self.blank_score is not None: T, N, C = scores.shape s = torch.tensor(self.blank_score, device=scores.device, dtype=scores.dtype) scores = torch.cat([s.expand(T, N, C//self.n_base, 1), scores.reshape(T, N, C//self.n_base, self.n_base)], axis=-1).reshape(T, N, -1) return scores def to_dict(self, include_weights=False): res = { 'insize': self.linear.in_features, 'n_base': self.n_base, 'state_len': self.state_len, 'bias': self.linear.bias is not None, 'scale': self.scale, 'activation': self.activation.name if self.activation else None, 'blank_score': self.blank_score, } if include_weights: res['params'] = { 'W': self.linear.weight, 'b': self.linear.bias if self.linear.bias is not None else [] } return res @register class SHA(Module): def __init__(self, dim): super().__init__() self.scale = dim ** -0.5 self.to_q = nn.Sequential(nn.Linear(dim, dim), nn.LayerNorm(dim)) def forward(self, x, kv): x = x.transpose(0, 1) kv = kv.transpose(0, 1) q = self.to_q(x) sim = torch.matmul(q, kv.transpose(-1, -2)) * self.scale attn = sim.softmax(dim=-1) out = torch.matmul(attn, kv) return out.transpose(0, 1) @register class SHABlock(Module): """ https://arxiv.org/abs/1911.11423 """ def __init__(self, dim, ff_mult=4): super().__init__() self.attn_query_norm = nn.LayerNorm(dim) self.attn_kv_norm = nn.LayerNorm(dim) self.attn = SHA(dim=dim) self.ff_residual_norm = nn.LayerNorm(dim) self.ff = Serial([ nn.LayerNorm(dim), nn.Linear(dim, dim * ff_mult), nn.GELU(), nn.Linear(dim * ff_mult, dim), ]) def forward(self, x): kv = self.attn_kv_norm(x) x = self.attn_query_norm(x) x = self.attn(x, kv) + x x = self.ff(x) + self.ff_residual_norm(x) return x @register class Permute(Module): def __init__(self, dims): super().__init__() self.dims = dims def forward(self, x): return x.permute(*self.dims) def to_dict(self, include_weights=False): return {'dims': self.dims} def truncated_normal(size, dtype=torch.float32, device=None, num_resample=5): x = torch.empty(size + (num_resample,), dtype=torch.float32, device=device).normal_() i = ((x < 2) & (x > -2)).max(-1, keepdim=True)[1] return torch.clamp_(x.gather(-1, i).squeeze(-1), -2, 2) class RNNWrapper(Module): def __init__( self, rnn_type, *args, reverse=False, orthogonal_weight_init=True, disable_state_bias=True, bidirectional=False, **kwargs ): super().__init__() if reverse and bidirectional: raise Exception("'reverse' and 'bidirectional' should not both be set to True") self.reverse = reverse self.rnn = rnn_type(*args, bidirectional=bidirectional, **kwargs) self.init_orthogonal(orthogonal_weight_init) self.init_biases() if disable_state_bias: self.disable_state_bias() def forward(self, x): if self.reverse: x = x.flip(0) y, h = self.rnn(x) if self.reverse: y = y.flip(0) return y def init_biases(self, types=('bias_ih',)): for name, param in self.rnn.named_parameters(): if any(k in name for k in types): with torch.no_grad(): param.set_(0.5*truncated_normal(param.shape, dtype=param.dtype, device=param.device)) def init_orthogonal(self, types=True): if not types: return if types == True: types = ('weight_ih', 'weight_hh') for name, x in self.rnn.named_parameters(): if any(k in name for k in types): for i in range(0, x.size(0), self.rnn.hidden_size): orthogonal_(x[i:i+self.rnn.hidden_size]) def disable_state_bias(self): for name, x in self.rnn.named_parameters(): if 'bias_hh' in name: x.requires_grad = False x.zero_() @register class LSTM(RNNWrapper): def __init__(self, size, insize, bias=True, reverse=False): super().__init__(torch.nn.LSTM, size, insize, bias=bias, reverse=reverse) def to_dict(self, include_weights=False): res = { 'size': self.rnn.hidden_size, 'insize': self.rnn.input_size, 'bias': self.rnn.bias, 'reverse': self.reverse, } if include_weights: res['params'] = { 'iW': self.rnn.weight_ih_l0.reshape(4, self.rnn.hidden_size, self.rnn.input_size), 'sW': self.rnn.weight_hh_l0.reshape(4, self.rnn.hidden_size, self.rnn.hidden_size), 'b': self.rnn.bias_ih_l0.reshape(4, self.rnn.hidden_size) } return res def to_dict(layer, include_weights=False): if hasattr(layer, 'to_dict'): return {'type': layer.name, **layer.to_dict(include_weights)} return {'type': layer.name} def from_dict(model_dict, layer_types=None): model_dict = model_dict.copy() if layer_types is None: layer_types = layers type_name = model_dict.pop('type') typ = layer_types[type_name] if 'sublayers' in model_dict: sublayers = model_dict['sublayers'] model_dict['sublayers'] = [ from_dict(x, layer_types) for x in sublayers ] if isinstance(sublayers, list) else from_dict(sublayers, layer_types) try: layer = typ(**model_dict) except Exception as e: raise Exception(f'Failed to build layer of type {typ} with args {model_dict}') from e return layer
bonito-master
bonito/nn.py
""" Bonito Input/Output """ import os import sys import csv import pandas as pd from warnings import warn from threading import Thread from logging import getLogger from contextlib import contextmanager from os.path import realpath, splitext, dirname import numpy as np from mappy import revcomp import bonito from bonito.cli.convert import typical_indices logger = getLogger('bonito') class CSVLogger: def __init__(self, filename, sep=','): self.filename = str(filename) if os.path.exists(self.filename): with open(self.filename) as f: self.columns = csv.DictReader(f).fieldnames else: self.columns = None self.fh = open(self.filename, 'a', newline='') self.csvwriter = csv.writer(self.fh, delimiter=sep) self.count = 0 def set_columns(self, columns): if self.columns: raise Exception('Columns already set') self.columns = list(columns) self.csvwriter.writerow(self.columns) def append(self, row): if self.columns is None: self.set_columns(row.keys()) self.csvwriter.writerow([row.get(k, '-') for k in self.columns]) self.count += 1 if self.count > 100: self.count = 0 self.fh.flush() def close(self): self.fh.close() def __enter__(self): return self def __exit__(self, *args): self.close() @contextmanager def devnull(*args, **kwds): """ A context manager that sends all out stdout & stderr to devnull. """ save_fds = [os.dup(1), os.dup(2)] null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)] os.dup2(null_fds[0], 1) os.dup2(null_fds[1], 2) try: yield finally: os.dup2(save_fds[0], 1) os.dup2(save_fds[1], 2) for fd in null_fds + save_fds: os.close(fd) def write_fasta(header, sequence, fd=sys.stdout): """ Write a fasta record to a file descriptor. """ fd.write(">%s\n" % header) fd.write("%s\n" % sequence) fd.flush() def write_fastq(header, sequence, qstring, fd=sys.stdout): """ Write a fastq record to a file descriptor. """ fd.write("@%s\n" % header) fd.write("%s\n" % sequence) fd.write("+\n") fd.write("%s\n" % qstring) fd.flush() def write_sam_header(aligner, fd=sys.stdout, sep='\t'): """ Write the SQ & PG sam headers to a file descriptor. """ fd.write('%s\n' % os.linesep.join([ sep.join([ '@SQ', 'SN:%s' % name, 'LN:%s' % len(aligner.seq(name)) ]) for name in aligner.seq_names ])) fd.write('%s\n' % sep.join([ '@PG', 'ID:bonito', 'PN:bonito', 'VN:%s' % bonito.__version__, 'CL:%s' % ' '.join(sys.argv), ])) fd.flush() def write_sam(read_id, sequence, qstring, mapping, fd=sys.stdout, unaligned=False, sep='\t'): """ Write a sam record to a file descriptor. """ if unaligned: fd.write("%s\n" % sep.join(map(str, [ read_id, 4, '*', 0, 0, '*', '*', 0, 0, sequence, qstring, 'NM:i:0' ]))) else: softclip = [ '%sS' % mapping.q_st if mapping.q_st else '', mapping.cigar_str, '%sS' % (len(sequence) - mapping.q_en) if len(sequence) - mapping.q_en else '' ] fd.write("%s\n" % sep.join(map(str, [ read_id, 0 if mapping.strand == +1 else 16, mapping.ctg, mapping.r_st + 1, mapping.mapq, ''.join(softclip if mapping.strand == +1 else softclip[::-1]), '*', 0, 0, sequence if mapping.strand == +1 else revcomp(sequence), qstring, 'NM:i:%s' % mapping.NM, 'MD:Z:%s' % mapping.MD, ]))) fd.flush() def summary_file(): """ Return the filename to use for the summary tsv. """ stdout = realpath('/dev/fd/1') if sys.stdout.isatty() or stdout.startswith('/proc'): return 'summary.tsv' return '%s_summary.tsv' % splitext(stdout)[0] summary_field_names = [ 'filename', 'read_id', 'run_id', 'channel', 'mux', 'start_time', 'duration', 'template_start', 'template_duration', 'sequence_length_template', 'mean_qscore_template', #if alignment 'alignment_genome', 'alignment_genome_start', 'alignment_genome_end', 'alignment_strand_start', 'alignment_strand_end', 'alignment_direction', 'alignment_length', 'alignment_num_aligned', 'alignment_num_correct', 'alignment_num_insertions', 'alignment_num_deletions', 'alignment_num_substitutions', 'alignment_mapq', 'alignment_strand_coverage', 'alignment_identity', 'alignment_accuracy', ] def summary_row(read, seqlen, qscore, alignment=False): """ Summary tsv row. """ fields = [ read.filename, read.read_id, read.run_id, read.channel, read.mux, read.start, read.duration, read.template_start, read.template_duration, seqlen, qscore, ] if alignment: ins = sum(count for count, op in alignment.cigar if op == 1) dels = sum(count for count, op in alignment.cigar if op == 2) subs = alignment.NM - ins - dels length = alignment.blen matches = length - ins - dels correct = alignment.mlen fields.extend([ alignment.ctg, alignment.r_st, alignment.r_en, alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en, alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st, '+' if alignment.strand == +1 else '-', length, matches, correct, ins, dels, subs, alignment.mapq, (alignment.q_en - alignment.q_st) / seqlen, correct / matches, correct / length, ]) elif alignment is None: fields.extend( ['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0] ) return dict(zip(summary_field_names, fields)) duplex_summary_field_names = [ 'filename_template', 'read_id_template', 'filename_complement', 'read_id_complement', 'run_id', 'channel_template', 'mux_template', 'channel_complement', 'mux_complement', 'sequence_length_duplex', 'mean_qscore_duplex', #if alignment 'alignment_genome', 'alignment_genome_start', 'alignment_genome_end', 'alignment_strand_start', 'alignment_strand_end', 'alignment_direction', 'alignment_length', 'alignment_num_aligned', 'alignment_num_correct', 'alignment_num_insertions', 'alignment_num_deletions', 'alignment_num_substitutions', 'alignment_mapq', 'alignment_strand_coverage', 'alignment_identity', 'alignment_accuracy', ] def duplex_summary_row(read_temp, comp_read, seqlen, qscore, alignment=False): """ Duplex summary tsv row. """ fields = [ read_temp.filename, read_temp.read_id, comp_read.filename, comp_read.read_id, read_temp.run_id, read_temp.channel, read_temp.mux, comp_read.channel, comp_read.mux, seqlen, qscore, ] if alignment: ins = sum(count for count, op in alignment.cigar if op == 1) dels = sum(count for count, op in alignment.cigar if op == 2) subs = alignment.NM - ins - dels length = alignment.blen matches = length - ins - dels correct = alignment.mlen fields.extend([ alignment.ctg, alignment.r_st, alignment.r_en, alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en, alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st, '+' if alignment.strand == +1 else '-', length, matches, correct, ins, dels, subs, alignment.mapq, (alignment.q_en - alignment.q_st) / seqlen, correct / matches, correct / length, ]) elif alignment is None: fields.extend( ['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0] ) return dict(zip(duplex_summary_field_names, fields)) class Writer(Thread): def __init__(self, iterator, aligner, fd=sys.stdout, fastq=False, duplex=False): super().__init__() self.fd = fd self.log = [] self.fastq = fastq self.duplex = duplex self.aligner = aligner self.iterator = iterator self.write_headers() def write_headers(self): if self.aligner: write_sam_header(self.aligner, fd=self.fd) def run(self): with CSVLogger(summary_file(), sep='\t') as summary: for read, res in self.iterator: seq = res['sequence'] qstring = res.get('qstring', '*') mean_qscore = res.get('mean_qscore', 0.0) mapping = res.get('mapping', False) if self.duplex: samples = len(read[0].signal) + len(read[1].signal) read_id = '%s;%s' % (read[0].read_id, read[1].read_id) else: samples = len(read.signal) read_id = read.read_id if len(seq): if self.aligner: write_sam(read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None) else: if self.fastq: write_fastq(read_id, seq, qstring, fd=self.fd) else: write_fasta(read_id, seq, fd=self.fd) if self.duplex: summary.append(duplex_summary_row(read[0], read[1], len(seq), mean_qscore, alignment=mapping)) else: summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping)) self.log.append((read_id, samples)) else: logger.warn("> skipping empty sequence %s", read_id) class CTCWriter(Thread): """ CTC writer process that writes output numpy training data. """ def __init__(self, iterator, aligner, min_coverage, min_accuracy, fd=sys.stdout): super().__init__() self.fd = fd self.log = [] self.aligner = aligner self.iterator = iterator self.min_coverage = min_coverage self.min_accuracy = min_accuracy self.write_headers() def write_headers(self): if self.aligner: write_sam_header(self.aligner, fd=self.fd) def run(self): chunks = [] targets = [] lengths = [] with CSVLogger(summary_file(), sep='\t') as summary: for read, ctc_data in self.iterator: seq = ctc_data['sequence'] qstring = ctc_data['qstring'] mean_qscore = ctc_data['mean_qscore'] mapping = ctc_data.get('mapping', False) self.log.append((read.read_id, len(read.signal))) if len(seq) == 0 or mapping is None: continue cov = (mapping.q_en - mapping.q_st) / len(seq) acc = mapping.mlen / mapping.blen refseq = self.aligner.seq(mapping.ctg, mapping.r_st, mapping.r_en) if acc < self.min_accuracy or cov < self.min_coverage or 'N' in refseq: continue write_sam(read.read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None) summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping)) if mapping.strand == -1: refseq = revcomp(refseq) target = [int(x) for x in refseq.translate({65: '1', 67: '2', 71: '3', 84: '4'})] targets.append(target) chunks.append(read.signal) lengths.append(len(target)) if len(chunks) == 0: sys.stderr.write("> no suitable ctc data to write\n") return chunks = np.array(chunks, dtype=np.float16) targets_ = np.zeros((chunks.shape[0], max(lengths)), dtype=np.uint8) for idx, target in enumerate(targets): targets_[idx, :len(target)] = target lengths = np.array(lengths, dtype=np.uint16) indices = np.random.permutation(typical_indices(lengths)) chunks = chunks[indices] targets_ = targets_[indices] lengths = lengths[indices] summary = pd.read_csv(summary_file(), sep='\t') summary.iloc[indices].to_csv(summary_file(), sep='\t', index=False) output_directory = '.' if sys.stdout.isatty() else dirname(realpath('/dev/fd/1')) np.save(os.path.join(output_directory, "chunks.npy"), chunks) np.save(os.path.join(output_directory, "references.npy"), targets_) np.save(os.path.join(output_directory, "reference_lengths.npy"), lengths) sys.stderr.write("> written ctc training data\n") sys.stderr.write(" - chunks.npy with shape (%s)\n" % ','.join(map(str, chunks.shape))) sys.stderr.write(" - references.npy with shape (%s)\n" % ','.join(map(str, targets_.shape))) sys.stderr.write(" - reference_lengths.npy shape (%s)\n" % ','.join(map(str, lengths.shape))) def stop(self): self.join()
bonito-master
bonito/io.py
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from bonito.cli import basecaller, train, evaluate, view, convert, download, export, duplex modules = [ 'basecaller', 'train', 'evaluate', 'view', 'convert', 'download', 'export', 'duplex', ] __version__ = '0.4.0' def main(): parser = ArgumentParser( 'bonito', formatter_class=ArgumentDefaultsHelpFormatter ) parser.add_argument( '-v', '--version', action='version', version='%(prog)s {}'.format(__version__) ) subparsers = parser.add_subparsers( title='subcommands', description='valid commands', help='additional help', dest='command' ) subparsers.required = True for module in modules: mod = globals()[module] p = subparsers.add_parser(module, parents=[mod.argparser()]) p.set_defaults(func=mod.main) args = parser.parse_args() args.func(args)
bonito-master
bonito/__init__.py
""" Bonito Multiprocesing """ import queue from itertools import count from threading import Thread from functools import partial from collections import deque from signal import signal, SIGINT from multiprocessing import Process, Queue, Event, Lock, cpu_count def process_iter(iterator, maxsize=1): """ Take an iterator and run it on another process. """ return iter(ProcessIterator(iterator, maxsize=maxsize)) def thread_iter(iterator, maxsize=1): """ Take an iterator and run it on another thread. """ return iter(ThreadIterator(iterator, maxsize=maxsize)) def process_cancel(): """ Register an cancel event on sigint """ event = Event() signal(SIGINT, lambda *a: event.set()) return event def process_map(func, iterator, n_proc=4, maxsize=0): """ Take an `iterator` of key, value pairs and apply `func` to all values using `n_proc` processes. """ if n_proc == 0: return ((k, func(v)) for k, v in iterator) return iter(ProcessMap(func, iterator, n_proc, output_queue=Queue(maxsize))) def thread_map(func, iterator, n_thread=4, maxsize=2): """ Take an `iterator` of key, value pairs and apply `func` to all values using `n_thread` threads. """ if n_thread == 0: return ((k, func(v)) for k, v in iterator) return iter(ThreadMap(partial(MapWorkerThread, func), iterator, n_thread, maxsize=maxsize)) class BackgroundIterator: """ Runs an iterator in the background. """ def __init__(self, iterator, maxsize=10): super().__init__() self.iterator = iterator self.queue = self.QueueClass(maxsize) def __iter__(self): self.start() while True: item = self.queue.get() if item is StopIteration: break yield item def run(self): for item in self.iterator: self.queue.put(item) self.queue.put(StopIteration) def stop(self): self.join() class ThreadIterator(BackgroundIterator, Thread): """ Runs an iterator in a separate process. """ QueueClass = queue.Queue class ProcessIterator(BackgroundIterator, Process): """ Runs an iterator in a separate process. """ QueueClass = Queue class MapWorker(Process): """ Process that reads items from an input_queue, applies a func to them and puts them on an output_queue """ def __init__(self, func, input_queue, output_queue): super().__init__() self.func = func self.input_queue = input_queue self.output_queue = output_queue def run(self): while True: item = self.input_queue.get() if item is StopIteration: break k, v = item self.output_queue.put((k, self.func(v))) class ProcessMap(Thread): def __init__(self, func, iterator, n_proc, output_queue=None): super().__init__() self.key_map = {} self.iterator = iterator self.work_queue = Queue(n_proc * 2) self.output_queue = output_queue or Queue() self.processes = [MapWorker(func, self.work_queue, self.output_queue) for _ in range(n_proc)] def start(self): for process in self.processes: process.start() super().start() def run(self): for (k, v) in self.iterator: self.work_queue.put((id(k), v)) self.key_map[id(k)] = k for _ in self.processes: self.work_queue.put(StopIteration) for process in self.processes: process.join() self.output_queue.put(StopIteration) def __iter__(self): self.start() while True: item = self.output_queue.get() if item is StopIteration: break k, v = item yield self.key_map.pop(k), v class MapWorkerThread(Thread): """ Process that reads items from an input_queue, applies a func to them and puts them on an output_queue """ def __init__(self, func, input_queue=None, output_queue=None): super().__init__() self.func = func self.input_queue = input_queue self.output_queue = output_queue def run(self): while True: item = self.input_queue.get() if item is StopIteration: self.output_queue.put(item) break k, v = item self.output_queue.put((k, self.func(v))) class ThreadMap(Thread): def __init__(self, worker_type, iterator, n_thread, maxsize=2): super().__init__() self.iterator = iterator self.n_thread = n_thread self.work_queues = [queue.Queue(maxsize) for _ in range(n_thread)] self.output_queues = [queue.Queue(maxsize) for _ in range(n_thread)] self.workers = [worker_type(input_queue=in_q, output_queue=out_q) for (in_q, out_q) in zip(self.work_queues, self.output_queues)] def start(self): for worker in self.workers: worker.start() super().start() def __iter__(self): self.start() for i in count(): item = self.output_queues[i % self.n_thread].get() if item is StopIteration: #do we need to empty output_queues in order to join worker threads? for j in range(i + 1, i + self.n_thread): self.output_queues[j % self.n_thread].get() break yield item def run(self): for i, (k, v) in enumerate(self.iterator): self.work_queues[i % self.n_thread].put((k, v)) for q in self.work_queues: q.put(StopIteration) for worker in self.workers: worker.join()
bonito-master
bonito/multiprocessing.py
""" Bonito train """ import os import re from glob import glob from functools import partial from time import perf_counter from collections import OrderedDict from datetime import datetime from bonito.util import accuracy, decode_ref, permute, concat, match_names import bonito import torch import numpy as np import torch.nn as nn from tqdm import tqdm from torch.optim.lr_scheduler import LambdaLR import torch.cuda.amp as amp class ChunkDataSet: def __init__(self, chunks, targets, lengths): self.chunks = np.expand_dims(chunks, axis=1) self.targets = targets self.lengths = lengths def __getitem__(self, i): return ( self.chunks[i].astype(np.float32), self.targets[i].astype(np.int64), self.lengths[i].astype(np.int64), ) def __len__(self): return len(self.lengths) def const_schedule(y): """ Constant Scheduler """ return lambda t: y def linear_schedule(y0, y1): """ Linear Scheduler """ return lambda t: y0 + (y1 - y0) * t def cosine_decay_schedule(y0, y1): """ Cosine Decay Scheduler """ return lambda t: y1 + 0.5 * (y0 - y1) * (np.cos(t * np.pi) + 1.0) def piecewise_schedule(knots, funcs): """ Piecewise Scheduler """ def f(t): i = np.searchsorted(knots, t) t0 = 0.0 if i == 0 else knots[i - 1] t1 = 1.0 if i == len(knots) else knots[i] return funcs[i]((t - t0) / (t1 - t0)) return f def func_scheduler(optimizer, func, total_steps, warmup_steps=None, warmup_ratio=0.1, start_step=0): """ Learning Rate Scheduler """ if warmup_steps: y0 = func(0.0) func = piecewise_schedule( [warmup_steps / total_steps], [linear_schedule(warmup_ratio * y0, y0), func] ) return LambdaLR(optimizer, (lambda step: func((step + start_step) / total_steps))) def load_state(dirname, device, model): """ Load a model state dict from disk """ model.to(device) weight_no = None weight_files = glob(os.path.join(dirname, "weights_*.tar")) if weight_files: weight_no = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files]) if weight_no: print("[picking up from epoch %s]" % weight_no) state_dict = torch.load( os.path.join(dirname, 'weights_%s.tar' % weight_no), map_location=device ) state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()} new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k.replace('module.', '') new_state_dict[name] = v model.load_state_dict(new_state_dict) epoch = weight_no else: epoch = 0 return epoch class Trainer: def __init__(self, model, device, train_loader, valid_loader, criterion=None, use_amp=True): self.model = model.to(device) self.device = device self.train_loader = train_loader self.valid_loader = valid_loader self.criterion = criterion or (model.seqdist.ctc_loss if hasattr(model, 'seqdist') else model.ctc_label_smoothing_loss) self.use_amp = use_amp self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp) self.optimizer = None def train_one_step(self, batch): data, targets, lengths = batch self.optimizer.zero_grad() with amp.autocast(enabled=self.use_amp): scores = self.model(data.to(self.device)) losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device)) if not isinstance(losses, dict): losses = {'loss': losses} self.scaler.scale(losses['loss']).backward() self.scaler.unscale_(self.optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=2.0).item() self.scaler.step(self.optimizer) self.scaler.update() return losses, grad_norm def train_one_epoch(self, loss_log, lr_scheduler): t0 = perf_counter() chunks = 0 self.model.train() progress_bar = tqdm( total=len(self.train_loader), desc='[0/{}]'.format(len(self.train_loader.dataset)), ascii=True, leave=True, ncols=100, bar_format='{l_bar}{bar}| [{elapsed}{postfix}]' ) smoothed_loss = None with progress_bar: for batch in self.train_loader: chunks += batch[0].shape[0] losses, grad_norm = self.train_one_step(batch) losses = {k: v.item() for k,v in losses.items()} if lr_scheduler is not None: lr_scheduler.step() smoothed_loss = losses['loss'] if smoothed_loss is None else (0.01 * losses['loss'] + 0.99 * smoothed_loss) progress_bar.set_postfix(loss='%.4f' % smoothed_loss) progress_bar.set_description("[{}/{}]".format(chunks, len(self.train_loader.dataset))) progress_bar.update() if loss_log is not None: loss_log.append({'chunks': chunks, 'time': perf_counter() - t0, 'grad_norm': grad_norm, **losses}) return smoothed_loss, perf_counter() - t0 def validate_one_step(self, batch): data, targets, lengths = batch scores = self.model(data.to(self.device)) losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device)) losses = {k: v.item() for k, v in losses.items()} if isinstance(losses, dict) else losses.item() if hasattr(self.model, 'decode_batch'): seqs = self.model.decode_batch(scores) else: seqs = [self.model.decode(x) for x in permute(scores, 'TNC', 'NTC')] refs = [decode_ref(target, self.model.alphabet) for target in targets] accs = [ accuracy(ref, seq, min_coverage=0.5) if len(seq) else 0. for ref, seq in zip(refs, seqs) ] return seqs, refs, accs, losses def validate_one_epoch(self): self.model.eval() with torch.no_grad(): seqs, refs, accs, losses = zip(*(self.validate_one_step(batch) for batch in self.valid_loader)) seqs, refs, accs = (sum(x, []) for x in (seqs, refs, accs)) loss = np.mean([(x['ctc_loss'] if isinstance(x, dict) else x) for x in losses]) return loss, np.mean(accs), np.median(accs) def init_optimizer(self, lr, **kwargs): self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=lr, **kwargs) def get_lr_scheduler(self, epochs, last_epoch=0): return func_scheduler( self.optimizer, cosine_decay_schedule(1.0, 0.1), epochs * len(self.train_loader), warmup_steps=500, start_step=last_epoch*len(self.train_loader) ) def fit(self, workdir, epochs=1, lr=2e-3, last_epoch=0): if self.optimizer is None: self.init_optimizer(lr) lr_scheduler = self.get_lr_scheduler(epochs, last_epoch=last_epoch) for epoch in range(1 + last_epoch, epochs + 1 + last_epoch): try: with bonito.io.CSVLogger(os.path.join(workdir, 'losses_{}.csv'.format(epoch))) as loss_log: train_loss, duration = self.train_one_epoch(loss_log, lr_scheduler) model_state = self.model.module.state_dict() if hasattr(self.model, 'module') else self.model.state_dict() torch.save(model_state, os.path.join(workdir, "weights_%s.tar" % epoch)) val_loss, val_mean, val_median = self.validate_one_epoch() except KeyboardInterrupt: break print("[epoch {}] directory={} loss={:.4f} mean_acc={:.3f}% median_acc={:.3f}%".format( epoch, workdir, val_loss, val_mean, val_median )) with bonito.io.CSVLogger(os.path.join(workdir, 'training.csv')) as training_log: training_log.append({ 'time': datetime.today(), 'duration': int(duration), 'epoch': epoch, 'train_loss': train_loss, 'validation_loss': val_loss, 'validation_mean': val_mean, 'validation_median': val_median })
bonito-master
bonito/training.py
""" Bonito Download """ import os import re from shutil import rmtree from zipfile import ZipFile from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from bonito.util import __data__, __models__ from bonito.cli.convert import main as convert from bonito.cli.convert import argparser as cargparser import requests from tqdm import tqdm class File: """ Small class for downloading models and training assets. """ __url__ = "https://nanoporetech.box.com/shared/static/" def __init__(self, path, url_frag, force): self.path = path self.force = force self.url = os.path.join(self.__url__, url_frag) def location(self, filename): return os.path.join(self.path, filename) def exists(self, filename): return os.path.exists(self.location(filename)) def download(self): """ Download the remote file """ # create the requests for the file req = requests.get(self.url, stream=True) total = int(req.headers.get('content-length', 0)) fname = re.findall('filename="([^"]+)', req.headers['content-disposition'])[0] # skip download if local file is found if self.exists(fname.strip('.zip')) and not self.force: print("[skipping %s]" % fname) return if self.exists(fname.strip('.zip')) and self.force: rmtree(self.location(fname.strip('.zip'))) # download the file with tqdm(total=total, unit='iB', ascii=True, ncols=100, unit_scale=True, leave=False) as t: with open(self.location(fname), 'wb') as f: for data in req.iter_content(1024): f.write(data) t.update(len(data)) print("[downloaded %s]" % fname) # unzip .zip files if fname.endswith('.zip'): with ZipFile(self.location(fname), 'r') as zfile: zfile.extractall(self.path) os.remove(self.location(fname)) # convert chunkify training files to bonito if fname.endswith('.hdf5'): print("[converting %s]" % fname) args = cargparser().parse_args([ self.location(fname), self.location(fname).strip('.hdf5') ]) convert(args) r9_models = [ "n8c07gc9ro09zt0ivgcoeuz6krnwsnf6.zip", # dna_r9.4.1@v1 "nas0uhf46fd1lh2jndhx2a54a9vvhxp4.zip", # dna_r9.4.1@v2 "1wodp3ur4jhvqvu5leowfg6lrw54jxp2.zip", # dna_r9.4.1@v3 "uetgwsnb8yfqvuyoka8p09mxilgskqc7.zip", # [email protected] "47t2y48zw4waly25lmzx6sagf4bbbqqz.zip", # [email protected] "hrv649cvx8lvomu1u0tsd47e5u2bbabt.zip", # [email protected] "arqi4qwcj9btsd6bbjsnlbai0s6dg8yd.zip", ] r10_models = [ "e70s615lh3i24rkhz006i0e4u4m8y2xa.zip", # dna_r10.3_q20ea "hnr5mwlm8vmdsfpvn5fsxn3mvhbucy5f.zip", # dna_r10.3@v3 "yesf11tisfrncmod5hj2xtx9kbdveuqt.zip", # [email protected] "ci6xdu7d4wczmhorhw1sweyg4gczx97t.zip", # [email protected] "4cunv5z7nwjag7v2bun0g7vk2lf8rqnc.zip", ] training = [ "cmh91cxupa0are1kc3z9aok425m75vrb.hdf5", ] def main(args): """ Download models and training sets """ if args.models or args.all: print("[downloading models]") for model in r9_models[-1 if args.latest else 0:]: File(__models__, model, args.force).download() for model in r10_models[-1 if args.latest else 0:]: File(__models__, model, args.force).download() if args.training or args.all: print("[downloading training data]") for train in training: File(__data__, train, args.force).download() def argparser(): parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter, add_help=False ) group = parser.add_mutually_exclusive_group() group.add_argument('--all', action='store_true') group.add_argument('--models', action='store_true') group.add_argument('--training', action='store_true') parser.add_argument('-f', '--force', action='store_true') parser.add_argument('--latest', action='store_true') return parser
bonito-master
bonito/cli/download.py
#!/usr/bin/env python """ Convert a Taiyaki chunkify training file to set of Bonito CTC .npy files """ import os import h5py import random import numpy as np from argparse import ArgumentParser from collections import OrderedDict from itertools import islice as take from argparse import ArgumentDefaultsHelpFormatter from tqdm import tqdm from bonito.training import ChunkDataSet def align(samples, pointers, reference): """ align to the start of the mapping """ squiggle_duration = len(samples) mapped_off_the_start = len(pointers[pointers < 0]) mapped_off_the_end = len(pointers[pointers >= squiggle_duration]) pointers = pointers[mapped_off_the_start:len(pointers) - mapped_off_the_end] reference = reference[mapped_off_the_start:len(reference) - mapped_off_the_end] return samples[pointers[0]:pointers[-1]], pointers - pointers[0], reference def scale(read, normalise=True): """ scale and normalise a read """ samples = read['Dacs'][:] scaling = read.attrs['range'] / read.attrs['digitisation'] scaled = (scaling * (samples + read.attrs['offset'])).astype(np.float32) if normalise: return (scaled - read.attrs['shift_frompA']) / read.attrs['scale_frompA'] return scaled def pad_lengths(ragged_array, max_len=None): lengths = np.array([len(x) for x in ragged_array], dtype=np.uint16) padded = np.zeros((len(ragged_array), max_len or np.max(lengths)), dtype=ragged_array[0].dtype) for x, y in zip(ragged_array, padded): y[:len(x)] = x return padded, lengths def regular_break_points(n, chunk_len, overlap=0, align='mid'): num_chunks, remainder = divmod(n - overlap, chunk_len - overlap) start = {'left': 0, 'mid': remainder // 2, 'right': remainder}[align] starts = np.arange(start, start + num_chunks*(chunk_len - overlap), (chunk_len - overlap)) return np.vstack([starts, starts + chunk_len]).T def get_chunks(read, break_points): sample = scale(read) pointers = read['Ref_to_signal'][:] target = read['Reference'][:] + 1 # CTC convention return ( (sample[i:j], target[ti:tj]) for (i, j), (ti, tj) in zip(break_points, np.searchsorted(pointers, break_points)) ) def chunk_dataset(reads, chunk_len, num_chunks=None): all_chunks = ( (chunk, target) for read in reads for chunk, target in get_chunks(reads[read], regular_break_points(len(reads[read]['Dacs']), chunk_len)) ) chunks, targets = zip(*tqdm(take(all_chunks, num_chunks), total=num_chunks)) targets, target_lens = pad_lengths(targets) # convert refs from ragged arrray return ChunkDataSet(chunks, targets, target_lens) def validation_split(reads, num_valid=1000): reads = np.random.permutation(sorted(reads.items())) return OrderedDict(reads[:-num_valid]), OrderedDict(reads[-num_valid:]) def typical_indices(x, n=2.5): mu, sd = np.mean(x), np.std(x) idx, = np.where((mu - n*sd < x) & (x < mu + n*sd)) return idx def filter_chunks(ds, idx): filtered = ChunkDataSet(ds.chunks.squeeze(1)[idx], ds.targets[idx], ds.lengths[idx]) filtered.targets = filtered.targets[:, :filtered.lengths.max()] return filtered def save_chunks(chunks, output_directory): os.makedirs(output_directory, exist_ok=True) np.save(os.path.join(output_directory, "chunks.npy"), chunks.chunks.squeeze(1)) np.save(os.path.join(output_directory, "references.npy"), chunks.targets) np.save(os.path.join(output_directory, "reference_lengths.npy"), chunks.lengths) print() print("> data written to %s:" % output_directory) print(" - chunks.npy with shape", chunks.chunks.squeeze(1).shape) print(" - references.npy with shape", chunks.targets.shape) print(" - reference_lengths.npy shape", chunks.lengths.shape) def main(args): random.seed(args.seed) np.random.seed(args.seed) reads = h5py.File(args.chunkify_file, 'r')['Reads'] training, validation = validation_split(reads, args.validation_reads) print("> preparing training chunks\n") training_chunks = chunk_dataset(training, args.chunksize) training_indices = typical_indices(training_chunks.lengths) training_chunks = filter_chunks(training_chunks, np.random.permutation(training_indices)) save_chunks(training_chunks, args.output_directory) print("\n> preparing validation chunks\n") validation_chunks = chunk_dataset(validation, args.chunksize) validation_indices = typical_indices(validation_chunks.lengths) validation_chunks = filter_chunks(validation_chunks, validation_indices) save_chunks(validation_chunks, os.path.join(args.output_directory, "validation")) def argparser(): parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter, add_help=False ) parser.add_argument("chunkify_file") parser.add_argument("output_directory") parser.add_argument("--seed", default=25, type=int) parser.add_argument("--chunksize", default=3600, type=int) parser.add_argument("--validation-reads", default=1000, type=int) return parser
bonito-master
bonito/cli/convert.py
bonito-master
bonito/cli/__init__.py
""" Bonito Export """ import os import re import sys import json import torch import bonito import hashlib import numpy as np from glob import glob from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter class JsonEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() elif isinstance(obj, torch.nn.Parameter): return obj.data elif isinstance(obj, torch.Tensor): return obj.detach().numpy() else: return super(JsonEncoder, self).default(obj) def file_md5(filename, nblock=1024): """ Get md5 string from file. """ hasher = hashlib.md5() block_size = nblock * hasher.block_size with open(filename, "rb") as fh: for blk in iter((lambda: fh.read(block_size)), b""): hasher.update(blk) return hasher.hexdigest() def reformat_output_layer(layer_dict): n_base, state_len, blank_score = [layer_dict.pop(k) for k in ['n_base', 'state_len', 'blank_score']] layer_dict['size'] = (n_base + 1) * n_base**state_len layer_dict['type'] = 'GlobalNormTransducer' if blank_score is not None: assert layer_dict['activation'] == 'tanh' params = layer_dict['params'] params['W'] = torch.nn.functional.pad( params['W'].reshape([n_base**state_len, n_base, -1]), (0, 0, 1, 0), value=0. ).reshape((n_base + 1) * n_base**state_len, -1) params['b'] = torch.nn.functional.pad( params['b'].reshape(n_base**state_len, n_base), (1, 0), value=np.arctanh(blank_score / layer_dict['scale']) ).reshape(-1) return layer_dict def to_guppy_dict(model, include_weights=True): guppy_dict = bonito.nn.to_dict(model.encoder, include_weights=include_weights) guppy_dict['sublayers'] = [x for x in guppy_dict['sublayers'] if x['type'] != 'permute'] guppy_dict['sublayers'] = [dict(x, type='LSTM', activation='tanh', gate='sigmoid') if x['type'] == 'lstm' else x for x in guppy_dict['sublayers']] guppy_dict['sublayers'] = [dict(x, padding=(x['padding'], x['padding'])) if x['type'] == 'convolution' else x for x in guppy_dict['sublayers']] guppy_dict['sublayers'] = [{'type': 'reverse', 'sublayers': x} if x.pop('reverse', False) else x for x in guppy_dict['sublayers']] guppy_dict['sublayers'][-1] = reformat_output_layer(guppy_dict['sublayers'][-1]) return guppy_dict def main(args): if not os.path.isdir(args.model): print("[error] file given - please provide a model directory to export.", file=sys.stderr) return 1 model = bonito.util.load_model(args.model, device='cpu') jsn = to_guppy_dict(model) weight_files = glob(os.path.join(args.model, "weights_*.tar")) weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files]) jsn["md5sum"] = file_md5(os.path.join(args.model, 'weights_%s.tar' % weights)) json.dump(jsn, sys.stdout, cls=JsonEncoder) def argparser(): parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter, add_help=False ) parser.add_argument('model') return parser
bonito-master
bonito/cli/export.py
""" Bonito model viewer - display a model architecture for a given config. """ import toml import argparse from bonito.util import load_symbol def main(args): config = toml.load(args.config) Model = load_symbol(config, "Model") model = Model(config) print(model) print("Total parameters in model", sum(p.numel() for p in model.parameters())) def argparser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False ) parser.add_argument("config") return parser
bonito-master
bonito/cli/view.py
""" Bonito Basecaller """ import sys import torch import numpy as np from tqdm import tqdm from time import perf_counter from datetime import timedelta from itertools import islice as take from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from bonito.aligner import Aligner from bonito.io import CTCWriter, Writer from bonito.fast5 import get_reads, read_chunks from bonito.multiprocessing import process_cancel from bonito.util import column_to_set, load_symbol, load_model def main(args): if args.save_ctc and not args.reference: sys.stderr.write("> a reference is needed to output ctc training data\n") exit(1) sys.stderr.write("> loading model\n") model = load_model(args.model_directory, args.device, weights=int(args.weights)) if args.reference: sys.stderr.write("> loading reference\n") aligner = Aligner(args.reference, preset='ont-map', best_n=1) if not aligner: sys.stderr.write("> failed to load/build index\n") exit(1) else: aligner = None reads = get_reads( args.reads_directory, n_proc=8, recursive=args.recursive, read_ids=column_to_set(args.read_ids), skip=args.skip, cancel=process_cancel() ) if args.max_reads: reads = take(reads, args.max_reads) basecall = load_symbol(args.model_directory, "basecall") if args.save_ctc: reads = ( chunk for read in reads for chunk in read_chunks(read, chunksize=args.chunksize) ) basecalls = basecall( model, reads, batchsize=64, chunksize=args.chunksize, aligner=aligner, qscores=args.fastq, reverse=args.revcomp, ) writer = CTCWriter( tqdm(basecalls, desc="> calling", unit=" reads", leave=False), aligner, args.ctc_min_coverage, args.ctc_min_accuracy ) else: basecalls = basecall( model, reads, aligner=aligner, reverse=args.revcomp, qscores=args.fastq, batchsize=args.batchsize, chunksize=args.chunksize, ) writer = Writer( tqdm(basecalls, desc="> calling", unit=" reads", leave=False), aligner, fastq=args.fastq ) t0 = perf_counter() writer.start() writer.join() duration = perf_counter() - t0 num_samples = sum(num_samples for read_id, num_samples in writer.log) sys.stderr.write("> completed reads: %s\n" % len(writer.log)) sys.stderr.write("> duration: %s\n" % timedelta(seconds=np.round(duration))) sys.stderr.write("> samples per second %.1E\n" % (num_samples / duration)) sys.stderr.write("> done\n") def argparser(): parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter, add_help=False ) parser.add_argument("model_directory") parser.add_argument("reads_directory") parser.add_argument("--reference") parser.add_argument("--read-ids") parser.add_argument("--device", default="cuda") parser.add_argument("--weights", default="0", type=str) parser.add_argument("--skip", action="store_true", default=False) parser.add_argument("--fastq", action="store_true", default=False) parser.add_argument("--save-ctc", action="store_true", default=False) parser.add_argument("--revcomp", action="store_true", default=False) parser.add_argument("--recursive", action="store_true", default=False) parser.add_argument("--ctc-min-coverage", default=0.9, type=float) parser.add_argument("--ctc-min-accuracy", default=0.9, type=float) parser.add_argument("--batchsize", default=32, type=int) parser.add_argument("--chunksize", default=4000, type=int) parser.add_argument("--max-reads", default=0, type=int) return parser
bonito-master
bonito/cli/basecaller.py
""" Bonito Duplex consensus decoding. https://www.biorxiv.org/content/10.1101/2020.02.25.956771v1 """ import os import sys import json from glob import glob from pathlib import Path from os.path import basename from functools import partial from time import perf_counter from datetime import timedelta from multiprocessing import Pool from itertools import islice, groupby from concurrent.futures import ProcessPoolExecutor from multiprocessing import Process, Queue, Lock, cpu_count from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter import spoa import torch import parasail import numpy as np import pandas as pd from tqdm import tqdm from fast_ctc_decode import crf_beam_search, crf_beam_search_duplex from genomeworks import cuda from genomeworks.cudapoa import CudaPoaBatch, status_to_str import bonito from bonito.io import Writer, devnull from bonito.aligner import Aligner, align_map from bonito.util import load_model, half_supported from bonito.crf.basecall import transfer, split_read, stitch from bonito.fast5 import get_raw_data_for_read, get_fast5_file from bonito.util import unbatchify, batchify, chunk, concat, accuracy from bonito.multiprocessing import thread_map, process_map, process_cancel def poagen(groups, gpu_percent=0.8): free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device()) gpu_mem_per_batch = gpu_percent * free max_seq_sz = 0 max_sequences_per_poa = 0 for group in groups: longest_seq = len(max(group, key=len)) max_seq_sz = longest_seq if longest_seq > max_seq_sz else max_seq_sz seq_in_poa = len(group) max_sequences_per_poa = seq_in_poa if seq_in_poa > max_sequences_per_poa else max_sequences_per_poa batch = CudaPoaBatch( max_sequences_per_poa, max_seq_sz, gpu_mem_per_batch, output_type="consensus", cuda_banded_alignment=True, alignment_band_width=256, ) poa_index = 0 initial_count = 0 while poa_index < len(groups): group = groups[poa_index] group_status, seq_status = batch.add_poa_group(group) # If group was added and more space is left in batch, continue onto next group. if group_status == 0: for seq_index, status in enumerate(seq_status): if status != 0: print("Could not add sequence {} to POA {} - error {}".format(seq_index, poa_index, status_to_str(status)), file=sys.stderr) poa_index += 1 # Once batch is full or no groups are left, run POA processing. if ((group_status == 1) or ((group_status == 0) and (poa_index == len(groups)))): batch.generate_poa() consensus, coverage, con_status = batch.get_consensus() for p, status in enumerate(con_status): if status != 0: print("Could not get consensus for POA group {} - {}".format(initial_count + p, status_to_str(status)), file=sys.stderr) yield from consensus initial_count = poa_index batch.reset() # In the case where POA group wasn't processed correctly. elif group_status != 0: print("Could not add POA group {} to batch - {}".format(poa_index, status_to_str(group_status)), file=sys.stderr) poa_index += 1 def get_read(readdir, summary, idx): """ Get a single read from row `idx` in the `summary` dataframe. """ return get_raw_data_for_read( (readdir / summary.iloc[idx].filename_fast5, summary.iloc[idx].read_id) ) def read_gen(directory, summary, n_proc=1, cancel=None): """ Generate reads from the given `directory` listed in the `summary` dataframe. """ with Pool(n_proc) as pool: for read in pool.imap(partial(get_read, Path(directory), summary), range(len(summary))): yield read if cancel is not None and cancel.is_set(): return def get_read_ids(filename): """ Return a dictionary of read_id -> filename mappings. """ with get_fast5_file(filename, 'r') as f5: return { read.read_id: basename(filename) for read in f5.get_reads() } def build_index(files, n_proc=1): """ Build an index of read ids to filename mappings """ index = {} with ProcessPoolExecutor(max_workers=n_proc) as pool: for res in tqdm(pool.map(get_read_ids, files), leave=False): index.update(res) return index def build_envelope(len1, seq1, path1, len2, seq2, path2, padding=15): # needleman-wunsch alignment with constant gap penalty. aln = parasail.nw_trace_striped_32(seq2, seq1, 2, 2, parasail.dnafull) # pair up positions alignment = np.column_stack([ np.cumsum([x != '-' for x in aln.traceback.ref]) - 1, np.cumsum([x != '-' for x in aln.traceback.query]) - 1 ]) path_range1 = np.column_stack([path1, path1[1:] + [len1]]) path_range2 = np.column_stack([path2, path2[1:] + [len2]]) envelope = np.full((len1, 2), -1, dtype=int) for idx1, idx2 in alignment.clip(0): st_1, en_1 = path_range1[idx1] st_2, en_2 = path_range2[idx2] for idx in range(st_1, en_1): if st_2 < envelope[idx, 0] or envelope[idx, 0] < 0: envelope[idx, 0] = st_2 if en_2 > envelope[idx, 1] or envelope[idx, 1] < 0: envelope[idx, 1] = en_2 # add a little padding to ensure some overlap envelope[:, 0] = envelope[:, 0] - padding envelope[:, 1] = envelope[:, 1] + padding envelope = np.clip(envelope, 0, len2) prev_end = 0 for i in range(envelope.shape[0]): if envelope[i, 0] > envelope[i, 1]: envelope[i, 0] = 0 if envelope[i, 0] > prev_end: envelope[i, 0] = prev_end prev_end = envelope[i, 1] return envelope.astype(np.uint64) def find_follow_on(df, gap=5, distance=51, cov=0.85, min_len=100): """ Find follow on reads from a sequencing summary file. """ df = df[ df.alignment_coverage.astype('float32').gt(cov) & df.sequence_length_template.astype('int32').gt(min_len) ] df = df.sort_values(['run_id', 'channel', 'mux', 'start_time']) genome_start = np.array(df.alignment_genome_start, dtype=np.int32) genome_end = np.array(df.alignment_genome_end, dtype=np.int32) direction = np.array(df.alignment_direction) start_time = np.array(df.start_time, dtype=np.float32) end_time = np.array(df.start_time + df.duration, dtype=np.float32) channel = np.array(df.channel, dtype=np.int32) mux = np.array(df.mux, dtype=np.int32) filt = ( (channel[1:] == channel[:-1]) & (mux[1:] == mux[:-1]) & (np.abs(genome_start[1:] - genome_start[:-1]) < distance) & (np.abs(genome_end[1:] - genome_end[:-1]) < distance) & (direction[1:] != direction[:-1]) & (start_time[1:] - end_time[:-1] < gap) ) mask = np.full(len(filt) + 1, False) mask[:-1] = mask[:-1] | filt mask[1:] = mask[1:] | filt return df[mask] def compute_scores(model, batch, reverse=False): with torch.no_grad(): device = next(model.parameters()).device dtype = torch.float16 if half_supported() else torch.float32 scores = model.encoder(batch.to(dtype).to(device)) if reverse: scores = model.seqdist.reverse_complement(scores) betas = model.seqdist.backward_scores(scores.to(torch.float32)) trans, init = model.seqdist.compute_transition_probs(scores, betas) return { 'trans': trans.to(dtype).transpose(0, 1), 'init': init.to(dtype).unsqueeze(1), } def basecall(model, reads, chunksize=4000, overlap=500, batchsize=32, reverse=False): reads = ( read_chunk for read in reads for read_chunk in split_read(read, chunksize * batchsize)[::-1 if reverse else 1] ) chunks = ( ((read, start, end), chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap)) for (read, start, end) in reads ) batches = ( (k, compute_scores(model, batch, reverse=reverse)) for k, batch in batchify(chunks, batchsize=batchsize) ) stitched = ( (read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse)) for ((read, start, end), x) in unbatchify(batches) ) transferred = thread_map(transfer, stitched, n_thread=1) return ( (read, concat([part for k, part in parts])) for read, parts in groupby(transferred, lambda x: x[0]) ) def beam_search_duplex(seq1, path1, t1, b1, seq2, path2, t2, b2, alphabet='NACGT', beamsize=5, pad=40, T=0.01): env = build_envelope(t1.shape[0], seq1, path1, t2.shape[0], seq2, path2, padding=pad) return crf_beam_search_duplex( t1, b1, t2, b2, alphabet=alphabet, beam_size=beamsize, beam_cut_threshold=T, envelope=env, ) def decode(res, beamsize_1=5, pad_1=40, cut_1=0.01, beamsize_2=5, pad_2=40, cut_2=0.01, match=80, alphabet="NACGT"): temp_probs, init1 = res[0]['trans'].astype(np.float32), res[0]['init'][0].astype(np.float32) comp_probs, init2 = res[1]['trans'].astype(np.float32), res[1]['init'][0].astype(np.float32) simplex1, path1 = crf_beam_search(temp_probs, init1, alphabet, beam_size=5, beam_cut_threshold=0.01) simplex2, path2 = crf_beam_search(comp_probs, init2, alphabet, beam_size=5, beam_cut_threshold=0.01) if len(simplex1) < 10 or len(simplex2) < 10: return [simplex1, simplex2] if accuracy(simplex1, simplex2) < match: return [simplex1, simplex2] duplex1 = beam_search_duplex( simplex1, path1, temp_probs, init1, simplex2, path2, comp_probs, init2, pad=pad_1, beamsize=5, T=cut_1 ) duplex2 = beam_search_duplex( simplex2, path2, comp_probs, init2, simplex1, path1, temp_probs, init1, pad=pad_2, beamsize=5, T=cut_2 ) return [duplex1, duplex2, simplex1, simplex2] def poa(seqs, allseq=False): con, msa = spoa.poa(seqs, genmsa=False) if allseq: return (con, *seqs) return (con, ) def call(model, reads_directory, templates, complements, aligner=None, cudapoa=True): temp_reads = read_gen(reads_directory, templates, n_proc=8, cancel=process_cancel()) comp_reads = read_gen(reads_directory, complements, n_proc=8, cancel=process_cancel()) temp_scores = basecall(model, temp_reads, reverse=False) comp_scores = basecall(model, comp_reads, reverse=True) scores = (((r1, r2), (s1, s2)) for (r1, s1), (r2, s2) in zip(temp_scores, comp_scores)) calls = thread_map(decode, scores, n_thread=12) if cudapoa: sequences = ((reads, [seqs, ]) for reads, seqs in calls if len(seqs) > 2) consensus = (zip(reads, poagen(calls)) for reads, calls in batchify(sequences, 100)) res = ((reads[0], {'sequence': seq}) for seqs in consensus for reads, seq in seqs) else: sequences = ((reads, seqs) for reads, seqs in calls if len(seqs) > 2) consensus = process_map(poa, sequences, n_proc=4) res = ((reads, {'sequence': seq}) for reads, seqs in consensus for seq in seqs) if aligner is None: return res return align_map(aligner, res) def main(args): sys.stderr.write("> loading model\n") model = load_model(args.model, args.device) if args.reference: sys.stderr.write("> loading reference\n") aligner = Aligner(args.reference, preset='ont-map') if not aligner: sys.stderr.write("> failed to load/build index\n") exit(1) else: aligner = None if args.summary: sys.stderr.write("> finding follow on strands\n") pairs = pd.read_csv(args.summary, '\t', low_memory=False) pairs = pairs[pairs.sequence_length_template.gt(0)] if 'filename' in pairs.columns: pairs = pairs.rename(columns={'filename': 'filename_fast5'}) if 'alignment_strand_coverage' in pairs.columns: pairs = pairs.rename(columns={'alignment_strand_coverage': 'alignment_coverage'}) valid_fast5s = [ f for f in pairs.filename_fast5.unique() if ((args.reads_directory / Path(f)).exists()) ] pairs = pairs[pairs.filename_fast5.isin(valid_fast5s)] pairs = find_follow_on(pairs) sys.stderr.write("> found %s follow strands in summary\n" % (len(pairs) // 2)) if args.max_reads > 0: pairs = pairs.head(args.max_reads) temp_reads = pairs.iloc[0::2] comp_reads = pairs.iloc[1::2] else: if args.index is not None: sys.stderr.write("> loading read index\n") index = json.load(open(args.index, 'r')) else: sys.stderr.write("> building read index\n") files = list(glob(os.path.join(args.reads_directory, '*.fast5'))) index = build_index(files, n_proc=8) if args.save_index: with open('bonito-read-id.idx', 'w') as f: json.dump(index, f) pairs = pd.read_csv(args.pairs, sep=args.sep, names=['read_1', 'read_2']) if args.max_reads > 0: pairs = pairs.head(args.max_reads) pairs['file_1'] = pairs['read_1'].apply(index.get) pairs['file_2'] = pairs['read_2'].apply(index.get) pairs = pairs.dropna().reset_index() temp_reads = pairs[['read_1', 'file_1']].rename( columns={'read_1': 'read_id', 'file_1': 'filename_fast5'} ) comp_reads = pairs[['read_2', 'file_2']].rename( columns={'read_2': 'read_id', 'file_2': 'filename_fast5'} ) if len(pairs) == 0: print("> no matched pairs found in given directory", file=sys.stderr) exit(1) # https://github.com/clara-parabricks/GenomeWorks/issues/648 with devnull(): CudaPoaBatch(1000, 1000, 3724032) basecalls = call(model, args.reads_directory, temp_reads, comp_reads, aligner=aligner) writer = Writer(tqdm(basecalls, desc="> calling", unit=" reads", leave=False), aligner, duplex=True) t0 = perf_counter() writer.start() writer.join() duration = perf_counter() - t0 num_samples = sum(num_samples for read_id, num_samples in writer.log) print("> duration: %s" % timedelta(seconds=np.round(duration)), file=sys.stderr) print("> samples per second %.1E" % (num_samples / duration), file=sys.stderr) def argparser(): parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter, add_help=False ) parser.add_argument("model") parser.add_argument("reads_directory") group = parser.add_mutually_exclusive_group() group.add_argument("--summary", default=None) group.add_argument("--pairs", default=None) parser.add_argument("--sep", default=' ') parser.add_argument("--index", default=None) parser.add_argument("--save-index", action="store_true", default=False) parser.add_argument("--reference") parser.add_argument("--device", default="cuda") parser.add_argument("--max-reads", default=0, type=int) return parser
bonito-master
bonito/cli/duplex.py
#!/usr/bin/env python3 """ Bonito training. """ import os from argparse import ArgumentParser from argparse import ArgumentDefaultsHelpFormatter from bonito.util import __models__, default_config, default_data from bonito.util import load_data, load_model, load_symbol, init, half_supported from bonito.training import ChunkDataSet, load_state, Trainer import toml import torch import numpy as np from torch.optim import AdamW from torch.utils.data import DataLoader def main(args): workdir = os.path.expanduser(args.training_directory) if os.path.exists(workdir) and not args.force: print("[error] %s exists, use -f to force continue training." % workdir) exit(1) init(args.seed, args.device) device = torch.device(args.device) print("[loading data]") train_data = load_data(limit=args.chunks, directory=args.directory) if os.path.exists(os.path.join(args.directory, 'validation')): valid_data = load_data(directory=os.path.join(args.directory, 'validation')) else: print("[validation set not found: splitting training set]") split = np.floor(len(train_data[0]) * 0.97).astype(np.int32) valid_data = [x[split:] for x in train_data] train_data = [x[:split] for x in train_data] train_loader = DataLoader(ChunkDataSet(*train_data), batch_size=args.batch, shuffle=True, num_workers=4, pin_memory=True) valid_loader = DataLoader(ChunkDataSet(*valid_data), batch_size=args.batch, num_workers=4, pin_memory=True) if args.pretrained: dirname = args.pretrained if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)): dirname = os.path.join(__models__, dirname) config_file = os.path.join(dirname, 'config.toml') else: config_file = args.config config = toml.load(config_file) argsdict = dict(training=vars(args)) os.makedirs(workdir, exist_ok=True) toml.dump({**config, **argsdict}, open(os.path.join(workdir, 'config.toml'), 'w')) print("[loading model]") if args.pretrained: print("[using pretrained model {}]".format(args.pretrained)) model = load_model(args.pretrained, device, half=False) else: model = load_symbol(config, 'Model')(config) last_epoch = load_state(workdir, args.device, model) if args.multi_gpu: from torch.nn import DataParallel model = DataParallel(model) model.decode = model.module.decode model.alphabet = model.module.alphabet trainer = Trainer(model, device, train_loader, valid_loader, use_amp=half_supported() and not args.no_amp) trainer.fit(workdir, args.epochs, args.lr, last_epoch=last_epoch) def argparser(): parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter, add_help=False ) parser.add_argument("training_directory") group = parser.add_mutually_exclusive_group() group.add_argument('--config', default=default_config) group.add_argument('--pretrained', default="") parser.add_argument("--directory", default=default_data) parser.add_argument("--device", default="cuda") parser.add_argument("--lr", default=2e-3, type=float) parser.add_argument("--seed", default=25, type=int) parser.add_argument("--epochs", default=5, type=int) parser.add_argument("--batch", default=64, type=int) parser.add_argument("--chunks", default=0, type=int) parser.add_argument("--no-amp", action="store_true", default=False) parser.add_argument("--multi-gpu", action="store_true", default=False) parser.add_argument("-f", "--force", action="store_true", default=False) return parser
bonito-master
bonito/cli/train.py
""" Bonito model evaluator """ import os import time import torch import numpy as np from itertools import starmap from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from bonito.training import ChunkDataSet from bonito.util import accuracy, poa, decode_ref, half_supported from bonito.util import init, load_data, load_model, concat, permute from torch.utils.data import DataLoader def main(args): poas = [] init(args.seed, args.device) print("* loading data") directory = args.directory if os.path.exists(os.path.join(directory, 'validation')): directory = os.path.join(directory, 'validation') testdata = ChunkDataSet( *load_data( limit=args.chunks, directory=directory ) ) dataloader = DataLoader(testdata, batch_size=args.batchsize) accuracy_with_cov = lambda ref, seq: accuracy(ref, seq, min_coverage=args.min_coverage) for w in [int(i) for i in args.weights.split(',')]: seqs = [] print("* loading model", w) model = load_model(args.model_directory, args.device, weights=w) print("* calling") t0 = time.perf_counter() with torch.no_grad(): for data, *_ in dataloader: if half_supported(): data = data.type(torch.float16).to(args.device) else: data = data.to(args.device) log_probs = model(data) if hasattr(model, 'decode_batch'): seqs.extend(model.decode_batch(log_probs)) else: seqs.extend([model.decode(p) for p in permute(log_probs, 'TNC', 'NTC')]) duration = time.perf_counter() - t0 refs = [decode_ref(target, model.alphabet) for target in dataloader.dataset.targets] accuracies = [accuracy_with_cov(ref, seq) if len(seq) else 0. for ref, seq in zip(refs, seqs)] if args.poa: poas.append(sequences) print("* mean %.2f%%" % np.mean(accuracies)) print("* median %.2f%%" % np.median(accuracies)) print("* time %.2f" % duration) print("* samples/s %.2E" % (args.chunks * data.shape[2] / duration)) if args.poa: print("* doing poa") t0 = time.perf_counter() # group each sequence prediction per model together poas = [list(seq) for seq in zip(*poas)] consensuses = poa(poas) duration = time.perf_counter() - t0 accuracies = list(starmap(accuracy_with_coverage_filter, zip(references, consensuses))) print("* mean %.2f%%" % np.mean(accuracies)) print("* median %.2f%%" % np.median(accuracies)) print("* time %.2f" % duration) def argparser(): parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatter, add_help=False ) parser.add_argument("model_directory") parser.add_argument("--directory", default=None) parser.add_argument("--device", default="cuda") parser.add_argument("--seed", default=9, type=int) parser.add_argument("--weights", default="0", type=str) parser.add_argument("--chunks", default=1000, type=int) parser.add_argument("--batchsize", default=96, type=int) parser.add_argument("--beamsize", default=5, type=int) parser.add_argument("--poa", action="store_true", default=False) parser.add_argument("--min-coverage", default=0.5, type=float) return parser
bonito-master
bonito/cli/evaluate.py
from .model import Model from .basecall import basecall
bonito-master
bonito/crf/__init__.py
""" Bonito CTC-CRF Model. """ import torch import numpy as np from bonito.nn import Module, Convolution, SHABlock, LinearCRFEncoder, Serial, Permute, layers, from_dict import seqdist.sparse from seqdist.ctc_simple import logZ_cupy, viterbi_alignments from seqdist.core import SequenceDist, Max, Log, semiring def get_stride(m): if hasattr(m, 'stride'): return m.stride if isinstance(m.stride, int) else m.stride[0] if isinstance(m, Convolution): return get_stride(m.conv) if isinstance(m, Serial): return int(np.prod([get_stride(x) for x in m])) return 1 class CTC_CRF(SequenceDist): def __init__(self, state_len, alphabet): super().__init__() self.alphabet = alphabet self.state_len = state_len self.n_base = len(alphabet[1:]) self.idx = torch.cat([ torch.arange(self.n_base**(self.state_len))[:, None], torch.arange( self.n_base**(self.state_len) ).repeat_interleave(self.n_base).reshape(self.n_base, -1).T ], dim=1).to(torch.int32) def n_score(self): return len(self.alphabet) * self.n_base**(self.state_len) def logZ(self, scores, S:semiring=Log): T, N, _ = scores.shape Ms = scores.reshape(T, N, -1, len(self.alphabet)) alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one) beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one) return seqdist.sparse.logZ(Ms, self.idx, alpha_0, beta_T, S) def normalise(self, scores): return (scores - self.logZ(scores)[:, None] / len(scores)) def forward_scores(self, scores, S: semiring=Log): T, N, _ = scores.shape Ms = scores.reshape(T, N, -1, self.n_base + 1) alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one) return seqdist.sparse.fwd_scores_cupy(Ms, self.idx, alpha_0, S, K=1) def backward_scores(self, scores, S: semiring=Log): T, N, _ = scores.shape Ms = scores.reshape(T, N, -1, self.n_base + 1) beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one) return seqdist.sparse.bwd_scores_cupy(Ms, self.idx, beta_T, S, K=1) def compute_transition_probs(self, scores, betas): T, N, C = scores.shape # add bwd scores to edge scores log_trans_probs = (scores.reshape(T, N, -1, self.n_base + 1) + betas[1:, :, :, None]) # transpose from (new_state, dropped_base) to (old_state, emitted_base) layout log_trans_probs = torch.cat([ log_trans_probs[:, :, :, [0]], log_trans_probs[:, :, :, 1:].transpose(3, 2).reshape(T, N, -1, self.n_base) ], dim=-1) # convert from log probs to probs by exponentiating and normalising trans_probs = torch.softmax(log_trans_probs, dim=-1) #convert first bwd score to initial state probabilities init_state_probs = torch.softmax(betas[0], dim=-1) return trans_probs, init_state_probs def reverse_complement(self, scores): T, N, C = scores.shape expand_dims = T, N, *(self.n_base for _ in range(self.state_len)), self.n_base + 1 scores = scores.reshape(*expand_dims) blanks = torch.flip(scores[..., 0].permute( 0, 1, *range(self.state_len + 1, 1, -1)).reshape(T, N, -1, 1), [0, 2] ) emissions = torch.flip(scores[..., 1:].permute( 0, 1, *range(self.state_len, 1, -1), self.state_len +2, self.state_len + 1).reshape(T, N, -1, self.n_base), [0, 2, 3] ) return torch.cat([blanks, emissions], dim=-1).reshape(T, N, -1) def viterbi(self, scores): traceback = self.posteriors(scores, Max) paths = traceback.argmax(2) % len(self.alphabet) return paths def path_to_str(self, path): alphabet = np.frombuffer(''.join(self.alphabet).encode(), dtype='u1') seq = alphabet[path[path != 0]] return seq.tobytes().decode() def prepare_ctc_scores(self, scores, targets): # convert from CTC targets (with blank=0) to zero indexed targets = torch.clamp(targets - 1, 0) T, N, C = scores.shape scores = scores.to(torch.float32) n = targets.size(1) - (self.state_len - 1) stay_indices = sum( targets[:, i:n + i] * self.n_base ** (self.state_len - i - 1) for i in range(self.state_len) ) * len(self.alphabet) move_indices = stay_indices[:, 1:] + targets[:, :n - 1] + 1 stay_scores = scores.gather(2, stay_indices.expand(T, -1, -1)) move_scores = scores.gather(2, move_indices.expand(T, -1, -1)) return stay_scores, move_scores def ctc_loss(self, scores, targets, target_lengths, loss_clip=None, reduction='mean', normalise_scores=True): if normalise_scores: scores = self.normalise(scores) stay_scores, move_scores = self.prepare_ctc_scores(scores, targets) logz = logZ_cupy(stay_scores, move_scores, target_lengths + 1 - self.state_len) loss = - (logz / target_lengths) if loss_clip: loss = torch.clamp(loss, 0.0, loss_clip) if reduction == 'mean': return loss.mean() elif reduction in ('none', None): return loss else: raise ValueError('Unknown reduction type {}'.format(reduction)) def ctc_viterbi_alignments(self, scores, targets, target_lengths): stay_scores, move_scores = self.prepare_ctc_scores(scores, targets) return viterbi_alignments(stay_scores, move_scores, target_lengths + 1 - self.state_len) def conv(c_in, c_out, ks, stride=1, bias=False, activation=None): return Convolution(c_in, c_out, ks, stride=stride, padding=ks//2, bias=bias, activation=activation) def rnn_encoder(n_base, state_len, insize=1, stride=5, winlen=19, activation='swish', rnn_type='lstm', features=768, scale=5.0, blank_score=None, single_head_attn=False): rnn = layers[rnn_type] return Serial([ conv(insize, 4, ks=5, bias=True, activation=activation), conv(4, 16, ks=5, bias=True, activation=activation), conv(16, features, ks=winlen, stride=stride, bias=True, activation=activation), Permute([2, 0, 1]), rnn(features, features, reverse=True), rnn(features, features), rnn(features, features, reverse=True), rnn(features, features), *([SHABlock(features)] if single_head_attn else []), rnn(features, features, reverse=True), LinearCRFEncoder(features, n_base, state_len, bias=True, activation='tanh', scale=scale, blank_score=blank_score) ]) class SeqdistModel(Module): def __init__(self, encoder, seqdist): super().__init__() self.seqdist = seqdist self.encoder = encoder self.stride = get_stride(encoder) self.alphabet = seqdist.alphabet def forward(self, x): return self.encoder(x).to(torch.float32) def decode_batch(self, x): scores = self.seqdist.posteriors(x.to(torch.float32)) + 1e-8 tracebacks = self.seqdist.viterbi(scores.log()).to(torch.int16).T return [self.seqdist.path_to_str(x) for x in tracebacks.cpu().numpy()] def decode(self, x): return self.decode_batch(x.unsqueeze(1))[0] class Model(SeqdistModel): def __init__(self, config): seqdist = CTC_CRF( state_len=config['global_norm']['state_len'], alphabet=config['labels']['labels'] ) if 'type' in config['encoder']: #new-style config encoder = from_dict(config['encoder']) else: #old-style encoder = rnn_encoder(seqdist.n_base, seqdist.state_len, insize=config['input']['features'], **config['encoder']) super().__init__(encoder, seqdist) self.config = config
bonito-master
bonito/crf/model.py
""" Bonito CRF basecall """ import torch import numpy as np from kbeam import beamsearch from itertools import groupby from functools import partial from operator import itemgetter import bonito from bonito.io import Writer from bonito.fast5 import get_reads from bonito.aligner import align_map from bonito.multiprocessing import thread_map, thread_iter from bonito.util import concat, chunk, batchify, unbatchify, half_supported def stitch(chunks, chunksize, overlap, length, stride, reverse=False): """ Stitch chunks together with a given overlap """ if isinstance(chunks, dict): return { k: stitch(v, chunksize, overlap, length, stride, reverse=reverse) for k, v in chunks.items() } return bonito.util.stitch(chunks, chunksize, overlap, length, stride, reverse=reverse) def compute_scores(model, batch, reverse=False): """ Compute scores for model. """ with torch.no_grad(): device = next(model.parameters()).device dtype = torch.float16 if half_supported() else torch.float32 scores = model(batch.to(dtype).to(device)) if reverse: scores = model.seqdist.reverse_complement(scores) betas = model.seqdist.backward_scores(scores.to(torch.float32)) betas -= (betas.max(2, keepdim=True)[0] - 5.0) return { 'scores': scores.transpose(0, 1), 'betas': betas.transpose(0, 1), } def quantise_int8(x, scale=127/5): """ Quantise scores to int8. """ scores = x['scores'] scores *= scale scores = torch.round(scores).to(torch.int8).detach() betas = x['betas'] betas *= scale betas = torch.round(torch.clamp(betas, -127., 128.)).to(torch.int8).detach() return {'scores': scores, 'betas': betas} def transfer(x): """ Device to host transfer using pinned memory. """ torch.cuda.synchronize() with torch.cuda.stream(torch.cuda.Stream()): return { k: torch.empty(v.shape, pin_memory=True, dtype=v.dtype).copy_(v).numpy() for k, v in x.items() } def decode_int8(scores, seqdist, scale=127/5, beamsize=40, beamcut=100.0): """ Beamsearch decode. """ path, _ = beamsearch( scores['scores'], scale, seqdist.n_base, beamsize, guide=scores['betas'], beam_cut=beamcut ) try: return seqdist.path_to_str(path % 4 + 1) except IndexError: return "" def split_read(read, split_read_length=400000): """ Split large reads into manageable pieces. """ if len(read.signal) <= split_read_length: return [(read, 0, len(read.signal))] breaks = np.arange(0, len(read.signal) + split_read_length, split_read_length) return [(read, start, min(end, len(read.signal))) for (start, end) in zip(breaks[:-1], breaks[1:])] def basecall(model, reads, aligner=None, beamsize=40, chunksize=4000, overlap=500, batchsize=32, qscores=False, reverse=False): """ Basecalls a set of reads. """ _decode = partial(decode_int8, seqdist=model.seqdist, beamsize=beamsize) reads = (read_chunk for read in reads for read_chunk in split_read(read)[::-1 if reverse else 1]) chunks = ( ((read, start, end), chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap)) for (read, start, end) in reads ) batches = ( (k, quantise_int8(compute_scores(model, batch, reverse=reverse))) for k, batch in thread_iter(batchify(chunks, batchsize=batchsize)) ) stitched = ( (read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse)) for ((read, start, end), x) in unbatchify(batches) ) transferred = thread_map(transfer, stitched, n_thread=1) basecalls = thread_map(_decode, transferred, n_thread=8) basecalls = ( (read, ''.join(seq for k, seq in parts)) for read, parts in groupby(basecalls, lambda x: (x[0].parent if hasattr(x[0], 'parent') else x[0])) ) basecalls = ( (read, {'sequence': seq, 'qstring': '?' * len(seq) if qscores else '*', 'mean_qscore': 0.0}) for read, seq in basecalls ) if aligner: return align_map(aligner, basecalls) return basecalls
bonito-master
bonito/crf/basecall.py
from .model import Model from .basecall import basecall
bonito-master
bonito/ctc/__init__.py
""" Bonito Model template """ import numpy as np from bonito.nn import Permute, layers import torch from torch.nn.functional import log_softmax, ctc_loss from torch.nn import Module, ModuleList, Sequential, Conv1d, BatchNorm1d, Dropout from fast_ctc_decode import beam_search, viterbi_search class Model(Module): """ Model template for QuartzNet style architectures https://arxiv.org/pdf/1910.10261.pdf """ def __init__(self, config): super(Model, self).__init__() if 'qscore' not in config: self.qbias = 0.0 self.qscale = 1.0 else: self.qbias = config['qscore']['bias'] self.qscale = config['qscore']['scale'] self.config = config self.stride = config['block'][0]['stride'][0] self.alphabet = config['labels']['labels'] self.features = config['block'][-1]['filters'] self.encoder = Encoder(config) self.decoder = Decoder(self.features, len(self.alphabet)) def forward(self, x): encoded = self.encoder(x) return self.decoder(encoded) def decode(self, x, beamsize=5, threshold=1e-3, qscores=False, return_path=False): x = x.exp().cpu().numpy().astype(np.float32) if beamsize == 1 or qscores: seq, path = viterbi_search(x, self.alphabet, qscores, self.qscale, self.qbias) else: seq, path = beam_search(x, self.alphabet, beamsize, threshold) if return_path: return seq, path return seq def ctc_label_smoothing_loss(self, log_probs, targets, lengths, weights=None): T, N, C = log_probs.shape weights = weights or torch.cat([torch.tensor([0.4]), (0.1 / (C - 1)) * torch.ones(C - 1)]) log_probs_lengths = torch.full(size=(N, ), fill_value=T, dtype=torch.int64) loss = ctc_loss(log_probs.to(torch.float32), targets, log_probs_lengths, lengths, reduction='mean') label_smoothing_loss = -((log_probs * weights.to(log_probs.device)).mean()) return {'loss': loss + label_smoothing_loss, 'ctc_loss': loss, 'label_smooth_loss': label_smoothing_loss} class Encoder(Module): """ Builds the model encoder """ def __init__(self, config): super(Encoder, self).__init__() self.config = config features = self.config['input']['features'] activation = layers[self.config['encoder']['activation']]() encoder_layers = [] for layer in self.config['block']: encoder_layers.append( Block( features, layer['filters'], activation, repeat=layer['repeat'], kernel_size=layer['kernel'], stride=layer['stride'], dilation=layer['dilation'], dropout=layer['dropout'], residual=layer['residual'], separable=layer['separable'], ) ) features = layer['filters'] self.encoder = Sequential(*encoder_layers) def forward(self, x): return self.encoder(x) class TCSConv1d(Module): """ Time-Channel Separable 1D Convolution """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False, separable=False): super(TCSConv1d, self).__init__() self.separable = separable if separable: self.depthwise = Conv1d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ) self.pointwise = Conv1d( in_channels, out_channels, kernel_size=1, stride=1, dilation=dilation, bias=bias, padding=0 ) else: self.conv = Conv1d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias ) def forward(self, x): if self.separable: x = self.depthwise(x) x = self.pointwise(x) else: x = self.conv(x) return x class Block(Module): """ TCSConv, Batch Normalisation, Activation, Dropout """ def __init__(self, in_channels, out_channels, activation, repeat=5, kernel_size=1, stride=1, dilation=1, dropout=0.0, residual=False, separable=False): super(Block, self).__init__() self.use_res = residual self.conv = ModuleList() _in_channels = in_channels padding = self.get_padding(kernel_size[0], stride[0], dilation[0]) # add the first n - 1 convolutions + activation for _ in range(repeat - 1): self.conv.extend( self.get_tcs( _in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding, separable=separable ) ) self.conv.extend(self.get_activation(activation, dropout)) _in_channels = out_channels # add the last conv and batch norm self.conv.extend( self.get_tcs( _in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding, separable=separable ) ) # add the residual connection if self.use_res: self.residual = Sequential(*self.get_tcs(in_channels, out_channels)) # add the activation and dropout self.activation = Sequential(*self.get_activation(activation, dropout)) def get_activation(self, activation, dropout): return activation, Dropout(p=dropout) def get_padding(self, kernel_size, stride, dilation): if stride > 1 and dilation > 1: raise ValueError("Dilation and stride can not both be greater than 1") return (kernel_size // 2) * dilation def get_tcs(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, padding=0, bias=False, separable=False): return [ TCSConv1d( in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias, separable=separable ), BatchNorm1d(out_channels, eps=1e-3, momentum=0.1) ] def forward(self, x): _x = x for layer in self.conv: _x = layer(_x) if self.use_res: _x = _x + self.residual(x) return self.activation(_x) class Decoder(Module): """ Decoder """ def __init__(self, features, classes): super(Decoder, self).__init__() self.layers = Sequential( Conv1d(features, classes, kernel_size=1, bias=True), Permute([2, 0, 1]) ) def forward(self, x): return log_softmax(self.layers(x), dim=-1)
bonito-master
bonito/ctc/model.py
""" Bonito basecall """ import torch import numpy as np from functools import partial from bonito.fast5 import ReadChunk from bonito.aligner import align_map from bonito.multiprocessing import process_map, thread_map from bonito.util import mean_qscore_from_qstring, half_supported from bonito.util import chunk, stitch, batchify, unbatchify, permute, concat def basecall(model, reads, aligner=None, beamsize=5, chunksize=0, overlap=0, batchsize=1, qscores=False, reverse=None): """ Basecalls a set of reads. """ chunks = ( (read, chunk(torch.tensor(read.signal), chunksize, overlap)) for read in reads ) scores = unbatchify( (k, compute_scores(model, v)) for k, v in batchify(chunks, batchsize) ) scores = ( (read, {'scores': stitch(v, chunksize, overlap, len(read.signal), model.stride)}) for read, v in scores ) decoder = partial(decode, decode=model.decode, beamsize=beamsize, qscores=qscores) basecalls = process_map(decoder, scores, n_proc=4) if aligner: return align_map(aligner, basecalls) return basecalls def compute_scores(model, batch): """ Compute scores for model. """ with torch.no_grad(): device = next(model.parameters()).device chunks = batch.to(torch.half).to(device) probs = permute(model(chunks), 'TNC', 'NTC') return probs.cpu().to(torch.float32) def decode(scores, decode, beamsize=5, qscores=False): """ Convert the network scores into a sequence. """ # do a greedy decode to get a sensible qstring to compute the mean qscore from seq, path = decode(scores['scores'], beamsize=1, qscores=True, return_path=True) seq, qstring = seq[:len(path)], seq[len(path):] mean_qscore = mean_qscore_from_qstring(qstring) # beam search will produce a better sequence but doesn't produce a sensible qstring/path if not (qscores or beamsize == 1): try: seq = decode(scores['scores'], beamsize=beamsize) path = None qstring = '*' except: pass return {'sequence': seq, 'qstring': qstring, 'mean_qscore': mean_qscore, 'path': path}
bonito-master
bonito/ctc/basecall.py
# Always prefer setuptools over distutils # To use a consistent encoding from codecs import open import os from os import path from setuptools import setup here = path.abspath(path.dirname(__file__)) # get keops version with open(os.path.join(here, "pykeops", "keops_version"), encoding="utf-8") as v: current_version = v.read().rstrip() # Get the long description from the README file with open(path.join(here, "pykeops", "readme.md"), encoding="utf-8") as f: long_description = f.read() # package setup setup( name="pykeops", version=current_version, description="Python bindings of KeOps: KErnel OPerationS, on CPUs and GPUs, with autodiff and without memory overflows", # Required long_description=long_description, long_description_content_type="text/markdown", url="http://www.kernel-operations.io/", project_urls={ "Bug Reports": "https://github.com/getkeops/keops/issues", "Source": "https://github.com/getkeops/keops", }, author="B. Charlier, J. Feydy, J. Glaunes", author_email="[email protected], [email protected], [email protected]", python_requires=">=3", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Topic :: Scientific/Engineering", "License :: OSI Approved :: MIT License", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Programming Language :: C++", "Programming Language :: Python :: 3 :: Only", ], keywords="kernels gpu autodiff", packages=[ "pykeops", "pykeops.common", "pykeops.common.keops_io", "pykeops.numpy", "pykeops.numpy.cluster", "pykeops.numpy.generic", "pykeops.numpy.lazytensor", "pykeops.test", "pykeops.torch", "pykeops.torch.cluster", "pykeops.torch.generic", "pykeops.torch.lazytensor", ], package_data={ "pykeops": [ "readme.md", "licence.txt", "keops_version", "common/keops_io/pykeops_nvrtc.cpp", ], }, install_requires=["numpy", "pybind11", "keopscore"], extras_require={ "full": [ "sphinx", "sphinx-gallery", "recommonmark", "sphinxcontrib-httpdomain", "sphinx_rtd_theme", "breathe", "matplotlib", "imageio", "torch", "gpytorch", "scikit-learn", "multiprocess", "faiss", "h5py", "jaxlib", "jax", ], "test:": ["pytest", "numpy", "torch"], }, )
keops-main
pykeops/setup.py
import importlib.util import sysconfig from os.path import join, dirname, realpath ############################################################### # Initialize some variables: the values may be redefined later numpy_found = importlib.util.find_spec("numpy") is not None torch_found = importlib.util.find_spec("torch") is not None from keopscore.config.config import use_cuda as gpu_available from keopscore.config.config import get_build_folder def pykeops_nvrtc_name(type="src"): basename = "pykeops_nvrtc" extension = ".cpp" if type == "src" else sysconfig.get_config_var("EXT_SUFFIX") return join( join(dirname(realpath(__file__)), "common", "keops_io") if type == "src" else get_build_folder(), basename + extension, ) def pykeops_cpp_name(tag="", extension=""): basename = "pykeops_cpp_" return join( get_build_folder(), basename + tag + extension, ) python_includes = "$(python3 -m pybind11 --includes)"
keops-main
pykeops/pykeops/config.py
import os import keopscore import keopscore.config import keopscore.config.config from keopscore.config.config import get_build_folder as keops_get_build_folder from . import config as pykeopsconfig ########################################################### # Verbosity level verbose = True if os.getenv("PYKEOPS_VERBOSE") == "0": verbose = False os.environ["KEOPS_VERBOSE"] = "0" def set_verbose(val): global verbose verbose = val keopscore.verbose = val ########################################################### # Set version with open( os.path.join(os.path.abspath(os.path.dirname(__file__)), "keops_version"), encoding="utf-8", ) as v: __version__ = v.read().rstrip() ########################################################### # Utils default_device_id = 0 # default Gpu device number if keopscore.config.config.use_cuda: if not os.path.exists(pykeopsconfig.pykeops_nvrtc_name(type="target")): from .common.keops_io.LoadKeOps_nvrtc import compile_jit_binary compile_jit_binary() def clean_pykeops(recompile_jit_binaries=True): import pykeops keopscore.clean_keops(recompile_jit_binary=recompile_jit_binaries) keops_binder = pykeops.common.keops_io.keops_binder for key in keops_binder: keops_binder[key].reset() if recompile_jit_binaries and keopscore.config.config.use_cuda: pykeops.common.keops_io.LoadKeOps_nvrtc.compile_jit_binary() def set_build_folder(path=None): import pykeops keopscore.set_build_folder(path) keops_binder = pykeops.common.keops_io.keops_binder for key in keops_binder: keops_binder[key].reset(new_save_folder=get_build_folder()) if keopscore.config.config.use_cuda and not os.path.exists( pykeops.config.pykeops_nvrtc_name(type="target") ): pykeops.common.keops_io.LoadKeOps_nvrtc.compile_jit_binary() def get_build_folder(): return keops_get_build_folder() if pykeopsconfig.numpy_found: from .numpy.test_install import test_numpy_bindings if pykeopsconfig.torch_found: from .torch.test_install import test_torch_bindings # next line is to ensure that cache file for formulas is loaded at import from .common import keops_io
keops-main
pykeops/pykeops/__init__.py
import math import torch from pykeops.torch import LazyTensor M, N, D, DV = 1000, 1000, 3, 1 dtype = torch.float32 device_id = "cpu" # "cuda" if torch.cuda.is_available() else "cpu" torch.backends.cuda.matmul.allow_tf32 = False torch.manual_seed(0) x = torch.rand(M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(N, DV, requires_grad=True, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y) ** 2).sum(dim=2) Kxy = (-Dxy).exp() if backend == "keops": out = LazyTensor.__matmul__(Kxy, b, backend="CPU") else: out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() # print("out:",out.flatten()[:10]) return out backends = ["keops", "torch"] # "keops_old" out = [] for backend in backends: out.append(fun(x, y, b, backend).squeeze()) out_g = [] for k, backend in enumerate(backends): out_g.append(torch.autograd.grad((out[k] ** 2).sum(), [b], create_graph=True)[0]) out_g2 = [] for k, backend in enumerate(backends): out_g2.append(torch.autograd.grad((out_g[k] ** 2).sum(), [b])[0]) class TestClass: def test_lazytensor_gaussian_cpu(self): assert torch.allclose(out[0], out[1]) def test_lazytensor_gaussian_cpu_bw1(self): assert torch.allclose(out_g[0], out_g[1]) def test_lazytensor_gaussian_cpu_bw2(self): assert torch.allclose(out_g2[0], out_g2[1])
keops-main
pykeops/pykeops/test/test_lazytensor_gaussian_cpu.py
import math import torch from pykeops.torch import LazyTensor M, N, D, DV = 2500, 2000, 3, 1 dtype = torch.float64 sum_scheme = "block_sum" torch.backends.cuda.matmul.allow_tf32 = False device_id = 0 if torch.cuda.is_available() else -1 torch.manual_seed(0) x = torch.rand(M, 1, D, dtype=dtype) / math.sqrt(D) y = torch.rand(1, N, D, dtype=dtype) / math.sqrt(D) b = torch.randn(N, DV, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y).square()).sum(dim=2) Kxy = (-Dxy).exp() if "keops" in backend: out = Kxy.__matmul__(b, sum_scheme=sum_scheme, device_id=device_id) else: out = Kxy @ b # print("out:",out) return out backends = ["keops", "torch"] out = [] for backend in backends: out.append(fun(x, y, b, backend).squeeze()) def test_lazytensor_gaussian_fromhost(): assert torch.allclose(out[0], out[1])
keops-main
pykeops/pykeops/test/test_lazytensor_gaussian_fromhost.py
import math import torch from pykeops.torch import LazyTensor B1, B2, M, N, D, DV = 2, 3, 200, 300, 3, 300 dtype = torch.float32 sum_scheme = "block_sum" torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.rand(B1, B2, M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(B1, 1, 1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(1, B2, N, DV, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y).square()).sum(dim=4) Kxy = (-Dxy).exp() if "keops" in backend: out = Kxy.__matmul__(b, sum_scheme=sum_scheme) else: out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() # print("out:",out[:,:10]) return out out = [] for backend in ["keops", "torch"]: out.append(fun(x, y, b, backend).squeeze()) def test_finalchunks_ranges(): assert torch.allclose(out[0], out[1], atol=0.0001)
keops-main
pykeops/pykeops/test/test_finalchunks_ranges.py
import unittest import itertools import numpy as np import pykeops import pykeops.config from pykeops.numpy.utils import ( squared_distances, np_kernel, log_np_kernel, grad_np_kernel, differences, log_sum_exp, ) class PytorchUnitTestCase(unittest.TestCase): A = int(5) # Batchdim 1 B = int(3) # Batchdim 2 M = int(10) N = int(6) D = int(3) E = int(3) nbatchdims = int(2) x = np.random.rand(M, D) a = np.random.rand(M, E) e = np.random.rand(M, E) f = np.random.rand(M, 1) y = np.random.rand(N, D) b = np.random.rand(N, E) g = np.random.rand(N, 1) p = np.random.rand(2) sigma = np.array([0.4]) alpha = np.array([0.1]) X = np.random.rand(A, B, M, D) L = np.random.rand(A, 1, M, 1) Y = np.random.rand(1, B, N, D) S = np.random.rand(A, B, 1) + 1 try: import torch use_cuda = torch.cuda.is_available() device = "cuda" if use_cuda else "cpu" torch.backends.cuda.matmul.allow_tf32 = False dtype = torch.float32 xc = torch.tensor(x, dtype=dtype, device=device, requires_grad=True) ac = torch.tensor(a, dtype=dtype, device=device, requires_grad=False) ec = torch.tensor(e, dtype=dtype, device=device, requires_grad=False) fc = torch.tensor(f, dtype=dtype, device=device, requires_grad=True) yc = torch.tensor(y, dtype=dtype, device=device, requires_grad=False) bc = torch.tensor(b, dtype=dtype, device=device, requires_grad=False) gc = torch.tensor(g, dtype=dtype, device=device, requires_grad=True) pc = torch.tensor(p, dtype=dtype, device=device, requires_grad=False) sigmac = torch.tensor(sigma, dtype=dtype, device=device, requires_grad=False) alphac = torch.tensor(alpha, dtype=dtype, device=device, requires_grad=False) Xc = torch.tensor(X, dtype=dtype, device=device, requires_grad=True) Lc = torch.tensor(L, dtype=dtype, device=device, requires_grad=False) Yc = torch.tensor(Y, dtype=dtype, device=device, requires_grad=True) Sc = torch.tensor(S, dtype=dtype, device=device, requires_grad=True) dtype = torch.float64 xcd = torch.tensor(x, dtype=dtype, device=device, requires_grad=False) acd = torch.tensor(a, dtype=dtype, device=device, requires_grad=False) ecd = torch.tensor(e, dtype=dtype, device=device, requires_grad=False) fcd = torch.tensor(f, dtype=dtype, device=device, requires_grad=False) ycd = torch.tensor(y, dtype=dtype, device=device, requires_grad=False) bcd = torch.tensor(b, dtype=dtype, device=device, requires_grad=False) gcd = torch.tensor(g, dtype=dtype, device=device, requires_grad=False) pcd = torch.tensor(p, dtype=dtype, device=device, requires_grad=False) sigmacd = torch.tensor(sigma, dtype=dtype, device=device, requires_grad=False) alphacd = torch.tensor(alpha, dtype=dtype, device=device, requires_grad=False) Xcd = torch.tensor(X, dtype=dtype, device=device, requires_grad=True) Lcd = torch.tensor(L, dtype=dtype, device=device, requires_grad=False) Ycd = torch.tensor(Y, dtype=dtype, device=device, requires_grad=True) Scd = torch.tensor(S, dtype=dtype, device=device, requires_grad=True) print("Running Pytorch tests.") except: print("Pytorch could not be loaded. Skip tests.") pass ############################################################ def test_generic_syntax_float(self): ############################################################ from pykeops.torch import Genred aliases = ["p=Pm(1)", "a=Vj(1)", "x=Vi(3)", "y=Vj(3)"] formula = "Square(p-a)*Exp(x+y)" if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b in backend_to_test: with self.subTest(b=b): # Call cuda kernel gamma_keops = Genred(formula, aliases, axis=1)( self.sigmac, self.gc, self.xc, self.yc, backend=b ) # Numpy version gamma_py = np.sum( (self.sigma - self.g) ** 2 * np.exp((self.y.T[:, :, np.newaxis] + self.x.T[:, np.newaxis, :])), axis=1, ).T # compare output self.assertTrue( np.allclose(gamma_keops.cpu().data.numpy(), gamma_py, atol=1e-6) ) ############################################################ def test_generic_syntax_double(self): ############################################################ from pykeops.torch import Genred aliases = ["p=Pm(1)", "a=Vj(1)", "x=Vi(3)", "y=Vj(3)"] formula = "Square(p-a)*Exp(x+y)" if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b in backend_to_test: with self.subTest(b=b): # Call cuda kernel gamma_keops = Genred(formula, aliases, axis=1)( self.sigmacd, self.gcd, self.xcd, self.ycd, backend=b ) # Numpy version gamma_py = np.sum( (self.sigma - self.g) ** 2 * np.exp((self.y.T[:, :, np.newaxis] + self.x.T[:, np.newaxis, :])), axis=1, ).T # compare output self.assertTrue( np.allclose(gamma_keops.cpu().data.numpy(), gamma_py, atol=1e-6) ) ############################################################ def test_generic_syntax_softmax(self): ############################################################ from pykeops.torch import Genred aliases = ["p=Pm(1)", "a=Vj(1)", "x=Vi(3)", "y=Vj(3)"] formula = "Square(p-a)*Exp(-SqNorm2(x-y))" formula_weights = "y" if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b in backend_to_test: with self.subTest(b=b): # Call cuda kernel myop = Genred( formula, aliases, reduction_op="SumSoftMaxWeight", axis=1, formula2=formula_weights, ) gamma_keops = myop( self.sigmacd, self.gcd, self.xcd, self.ycd, backend=b ) # Numpy version def np_softmax(x, w): x -= np.max(x, axis=1)[:, None] # subtract the max for robustness return np.exp(x) @ w / np.sum(np.exp(x), axis=1)[:, None] gamma_py = np_softmax( (self.sigma - self.g.T) ** 2 * np.exp(-squared_distances(self.x, self.y)), self.y, ) # compare output self.assertTrue( np.allclose(gamma_keops.cpu().data.numpy(), gamma_py, atol=1e-6) ) ############################################################ def test_generic_syntax_simple(self): ############################################################ from pykeops.torch import Genred aliases = [ "P = Pm(2)", # 1st argument, a parameter, dim 2. "X = Vi(" + str(self.xc.shape[1]) + ") ", # 2nd argument, indexed by i, dim D. "Y = Vj(" + str(self.yc.shape[1]) + ") ", ] # 3rd argument, indexed by j, dim D. formula = "Pow((X|Y),2) * ((Elem(P,0) * X) + (Elem(P,1) * Y))" if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b in backend_to_test: with self.subTest(b=b): my_routine = Genred(formula, aliases, reduction_op="Sum", axis=1) gamma_keops = my_routine(self.pc, self.xc, self.yc, backend=b) # Numpy version scals = (self.x @ self.y.T) ** 2 # Memory-intensive computation! gamma_py = self.p[0] * scals.sum(1).reshape(-1, 1) * self.x + self.p[ 1 ] * (scals @ self.y) # compare output self.assertTrue( np.allclose(gamma_keops.cpu().data.numpy(), gamma_py, atol=1e-6) ) ############################################################ def test_logSumExp_kernels_feature(self): ############################################################ from pykeops.torch import Vi, Vj, Pm kernels = { "gaussian": lambda xc, yc, sigmac: ( -Pm(1 / sigmac**2) * Vi(xc).sqdist(Vj(yc)) ), "laplacian": lambda xc, yc, sigmac: ( -(Pm(1 / sigmac**2) * Vi(xc).sqdist(Vj(yc))).sqrt() ), "cauchy": lambda xc, yc, sigmac: ( 1 + Pm(1 / sigmac**2) * Vi(xc).sqdist(Vj(yc)) ) .power(-1) .log(), "inverse_multiquadric": lambda xc, yc, sigmac: ( 1 + Pm(1 / sigmac**2) * Vi(xc).sqdist(Vj(yc)) ) .sqrt() .power(-1) .log(), } for k in ["gaussian", "laplacian", "cauchy", "inverse_multiquadric"]: with self.subTest(k=k): # Call cuda kernel gamma_lazy = kernels[k](self.xc, self.yc, self.sigmac) gamma_lazy = gamma_lazy.logsumexp(dim=1, weight=Vj(self.gc.exp())).cpu() # gamma = kernel_product(params, self.xc, self.yc, self.gc).cpu() # Numpy version log_K = log_np_kernel(self.x, self.y, self.sigma, kernel=k) log_KP = log_K + self.g.T gamma_py = log_sum_exp(log_KP, axis=1) # compare output self.assertTrue( np.allclose(gamma_lazy.data.numpy().ravel(), gamma_py, atol=1e-6) ) ############################################################ def test_logSumExp_gradient_kernels_feature(self): ############################################################ import torch from pykeops.torch import Genred aliases = [ "P = Pm(2)", # 1st argument, a parameter, dim 2. "X = Vi(" + str(self.gc.shape[1]) + ") ", # 2nd argument, indexed by i, dim D. "Y = Vj(" + str(self.fc.shape[1]) + ") ", ] # 3rd argument, indexed by j, dim D. formula = "(Elem(P,0) * X) + (Elem(P,1) * Y)" # Pytorch version my_routine = Genred(formula, aliases, reduction_op="LogSumExp", axis=1) tmp = my_routine(self.pc, self.fc, self.gc, backend="auto") res = torch.dot( torch.ones_like(tmp).view(-1), tmp.view(-1) ) # equivalent to tmp.sum() but avoiding contiguity pb gamma_keops = torch.autograd.grad(res, [self.fc, self.gc], create_graph=False) # Numpy version tmp = self.p[0] * self.f + self.p[1] * self.g.T res_py = (np.exp(tmp)).sum(axis=1) tmp2 = np.exp(tmp.T) / res_py.reshape(1, -1) gamma_py = [np.ones(self.M) * self.p[0], self.p[1] * tmp2.T.sum(axis=0)] # compare output self.assertTrue( np.allclose( gamma_keops[0].cpu().data.numpy().ravel(), gamma_py[0], atol=1e-6 ) ) self.assertTrue( np.allclose( gamma_keops[1].cpu().data.numpy().ravel(), gamma_py[1], atol=1e-6 ) ) ############################################################ def test_non_contiguity(self): ############################################################ from pykeops.torch import Genred aliases = [ "P = Pm(2)", # 1st argument, a parameter, dim 2. "X = Vi(" + str(self.xc.shape[1]) + ") ", # 2nd argument, indexed by i, dim D. "Y = Vj(" + str(self.yc.shape[1]) + ") ", ] # 3rd argument, indexed by j, dim D. formula = "Pow((X|Y),2) * ((Elem(P,0) * X) + (Elem(P,1) * Y))" my_routine = Genred(formula, aliases, reduction_op="Sum", axis=1) yc_tmp = self.yc.t().contiguous().t() # create a non contiguous copy # check output self.assertFalse(yc_tmp.is_contiguous()) my_routine(self.pc, self.xc, yc_tmp, backend="auto") ############################################################ def test_heterogeneous_var_aliases(self): ############################################################ from pykeops.torch import Genred from pykeops.numpy.utils import squared_distances aliases = ["p=Pm(0,1)", "x=Vi(1,3)", "y=Vj(2,3)"] formula = "Square(p-Var(3,1,1))*Exp(-SqNorm2(y-x))" # Call cuda kernel myconv = Genred(formula, aliases, reduction_op="Sum", axis=1) gamma_keops = myconv(self.sigmac, self.xc, self.yc, self.gc, backend="auto") # Numpy version gamma_py = np.sum( (self.sigma - self.g.T) ** 2 * np.exp(-squared_distances(self.x, self.y)), axis=1, ) # compare output self.assertTrue( np.allclose( gamma_keops.cpu().data.numpy().ravel(), gamma_py.ravel(), atol=1e-6 ) ) ############################################################ def test_invkernel(self): ############################################################ import torch from pykeops.torch.operations import KernelSolve formula = "Exp(-oos2*SqDist(x,y))*b" aliases = [ "x = Vi(" + str(self.D) + ")", # First arg : i-variable, of size D "y = Vj(" + str(self.D) + ")", # Second arg : j-variable, of size D "b = Vj(" + str(self.E) + ")", # Third arg : j-variable, of size Dv "oos2 = Pm(1)", ] # Fourth arg : scalar parameter Kinv = KernelSolve(formula, aliases, "b", axis=1) c = Kinv(self.xc, self.xc, self.ac, self.sigmac, alpha=self.alphac) if torch.__version__ >= "1.8": torchsolve = lambda A, B: torch.linalg.solve(A, B) else: torchsolve = lambda A, B: torch.solve(B, A)[0] c_ = torchsolve( self.alphac * torch.eye(self.M, device=self.device) + torch.exp( -torch.sum((self.xc[:, None, :] - self.xc[None, :, :]) ** 2, dim=2) * self.sigmac ), self.ac, ) self.assertTrue( np.allclose( c.cpu().data.numpy().ravel(), c_.cpu().data.numpy().ravel(), atol=1e-4 ) ) (u,) = torch.autograd.grad(c, self.xc, self.ec) (u_,) = torch.autograd.grad(c_, self.xc, self.ec) self.assertTrue( np.allclose( u.cpu().data.numpy().ravel(), u_.cpu().data.numpy().ravel(), atol=1e-4 ) ) ############################################################ def test_softmax(self): ############################################################ import torch from pykeops.torch import Genred formula = "SqDist(x,y)" formula_weights = "b" aliases = [ "x = Vi(" + str(self.D) + ")", # First arg : i-variable, of size D "y = Vj(" + str(self.D) + ")", # Second arg : j-variable, of size D "b = Vj(" + str(self.E) + ")", ] # third arg : j-variable, of size Dv softmax_op = Genred( formula, aliases, reduction_op="SumSoftMaxWeight", axis=1, formula2=formula_weights, ) c = softmax_op(self.xc, self.yc, self.bc) # compare with direct implementation cc = 0 for k in range(self.D): xk = self.xc[:, k][:, None] yk = self.yc[:, k][:, None] cc += (xk - yk.t()) ** 2 cc -= torch.max(cc, dim=1)[0][:, None] # subtract the max for robustness cc = torch.exp(cc) @ self.bc / torch.sum(torch.exp(cc), dim=1)[:, None] self.assertTrue( np.allclose( c.cpu().data.numpy().ravel(), cc.cpu().data.numpy().ravel(), atol=1e-6 ) ) ############################################################ def test_pickle(self): ############################################################ from pykeops.torch import Genred import pickle formula = "SqDist(x,y)" aliases = [ "x = Vi(" + str(self.D) + ")", # First arg : i-variable, of size D "y = Vj(" + str(self.D) + ")", # Second arg : j-variable, of size D ] kernel_instance = Genred(formula, aliases, reduction_op="Sum", axis=1) # serialize/pickle serialized_kernel = pickle.dumps(kernel_instance) # deserialize/unpickle deserialized_kernel = pickle.loads(serialized_kernel) self.assertTrue(type(kernel_instance), type(deserialized_kernel)) ############################################################ def test_LazyTensor_sum(self): ############################################################ import torch from pykeops.torch import LazyTensor full_results = [] for use_keops in [True, False]: results = [] # N.B.: We could loop over float32 and float64, but this would take longer... for (x, l, y, s) in [(self.Xc, self.Lc, self.Yc, self.Sc)]: # Float32 x_i = x.unsqueeze(-2) l_i = l.unsqueeze(-2) y_j = y.unsqueeze(-3) s_p = s.unsqueeze(-2).unsqueeze(-2) if use_keops: x_i, l_i, y_j, s_p = ( LazyTensor(x_i), LazyTensor(l_i), LazyTensor(y_j), LazyTensor(s_p), ) D_ij = (0.5 * (l_i * x_i - y_j) ** 2 / s_p).sum(-1) K_ij = (-D_ij).exp() a_i = K_ij.sum(self.nbatchdims + 1) if use_keops: a_i = a_i.squeeze(-1) [g_x, g_y, g_s] = torch.autograd.grad( (a_i**2).sum(), [x, y, s], create_graph=True ) [g_xx] = torch.autograd.grad((g_x**2).sum(), [x], create_graph=True) results += [a_i, g_x, g_y, g_s, g_xx] full_results.append(results) for (res_keops, res_torch) in zip(full_results[0], full_results[1]): self.assertTrue(res_keops.shape == res_torch.shape) self.assertTrue( np.allclose( res_keops.cpu().data.numpy().ravel(), res_torch.cpu().data.numpy().ravel(), atol=1e-3, ), "KeOps:\n" + str(res_keops) + "\nPyTorch:\n" + str(res_torch) + "\nMax error: {:.2e}".format((res_keops - res_torch).abs().max()), ) ############################################################ def test_LazyTensor_logsumexp(self): ############################################################ import torch from pykeops.torch import LazyTensor full_results = [] for use_keops in [True, False]: results = [] # N.B.: We could loop over float32 and float64, but this would take longer... for (x, l, y, s) in [(self.Xcd, self.Lcd, self.Ycd, self.Scd)]: # Float64 x_i = x.unsqueeze(-2) l_i = l.unsqueeze(-2) y_j = y.unsqueeze(-3) s_p = s.unsqueeze(-2).unsqueeze(-2) if use_keops: x_i, l_i, y_j, s_p = ( LazyTensor(x_i), LazyTensor(l_i), LazyTensor(y_j), LazyTensor(s_p), ) D_ij = ((l_i * x_i + y_j).relu() * s_p / 9).sum(-1) K_ij = -1 / (1 + D_ij) a_i = K_ij.logsumexp(self.nbatchdims + 1) if use_keops: a_i = a_i.squeeze(-1) [g_x, g_y, g_s] = torch.autograd.grad( (1.0 * a_i).sum(), [x, y, s], create_graph=True ) # N.B. (Joan, sept 2020) commenting out the 2nd order gradient computation here, # since it slows down too much the compilation currently, when using Cuda 11. # [g_xs] = torch.autograd.grad((g_x.abs()).sum(), [s], create_graph=True) results += [a_i, g_x, g_y, g_s, g_xs] full_results.append(results) for (res_keops, res_torch) in zip(full_results[0], full_results[1]): self.assertTrue(res_keops.shape == res_torch.shape) self.assertTrue( np.allclose( res_keops.cpu().data.numpy().ravel(), res_torch.cpu().data.numpy().ravel(), atol=1e-5, ) ) ############################################################ # Test min reduction with chunk without batches def test_LazyTensor_min_chunked(self): ############################################################ from pykeops.torch import LazyTensor import torch X = np.random.rand(self.M, 990) Xc = torch.tensor(X, dtype=self.dtype, device=self.device) Y = np.random.rand(self.N, 990) Yc = torch.tensor(Y, dtype=self.dtype, device=self.device) full_results = [] for use_keops in [True, False]: results = [] # N.B.: We could loop over float32 and float64, but this would take longer... for (x, y) in [(Xc, Yc)]: # Float32 x_i = x.unsqueeze(-2) y_j = y.unsqueeze(-3) if use_keops: x_i, y_j = ( LazyTensor(x_i), LazyTensor(y_j), ) K_ij = ((-(((x_i + y_j)) ** 2)).exp()).sum(-1, keepdim=True) if use_keops: m, am = K_ij.min_argmin(dim=0) else: m, am = K_ij.min(dim=0) results += [m, am] full_results.append(results) for (res_keops, res_torch) in zip(full_results[0], full_results[1]): self.assertTrue(res_keops.shape == res_torch.shape) self.assertTrue( np.allclose( res_keops.cpu().data.numpy().ravel(), res_torch.cpu().data.numpy().ravel(), atol=1e-5, ) ) ############################################################ def test_LazyTensor_min(self): ############################################################ from pykeops.torch import LazyTensor full_results = [] for use_keops in [True, False]: results = [] # N.B.: We could loop over float32 and float64, but this would take longer... for (x, l, y, s) in [(self.Xc, self.Lc, self.Yc, self.Sc)]: # Float32 x_i = x.unsqueeze(-2) l_i = l.unsqueeze(-2) y_j = y.unsqueeze(-3) s_p = s.unsqueeze(-2).unsqueeze(-2) if use_keops: x_i, l_i, y_j, s_p = ( LazyTensor(x_i), LazyTensor(l_i), LazyTensor(y_j), LazyTensor(s_p), ) D_ij = ((1 + ((l_i * x_i + y_j).relu() * s_p) ** 2).log()).sum( -1, keepdim=True ) K_ij = (D_ij**1.5 + 1).cos() * (D_ij * (3.2 + s_p)).sin() if use_keops: m, am = K_ij.min_argmin(dim=self.nbatchdims) else: m, am = K_ij.min(dim=self.nbatchdims) results += [m, am] full_results.append(results) for (res_keops, res_torch) in zip(full_results[0], full_results[1]): self.assertTrue(res_keops.shape == res_torch.shape) self.assertTrue( np.allclose( res_keops.cpu().data.numpy().ravel(), res_torch.cpu().data.numpy().ravel(), atol=1e-5, ) ) ############################################################ def test_TensorDot_with_permute(self): ############################################################ import torch from pykeops.torch import LazyTensor def my_tensordort_perm(a, b, dims=None, perm=None): return torch.tensordot(a, b, dims=dims).sum(3).permute(perm) def invert_permutation_numpy(permutation): return np.arange(len(permutation))[np.argsort(permutation)] x = torch.randn(self.M, 2, 3, 2, 2, 4, requires_grad=True, dtype=torch.float64) y = torch.randn( self.N, 2, 4, 2, 3, 2, 3, requires_grad=True, dtype=torch.float64 ) dimfa, dimfb = x.shape[1:], y.shape[1:] contfa, contfb = [5, 1, 3], [2, 5, 3] perm = [4, 3, 2, 0, 1] perm_torch = (0,) + tuple([(i + 1) for i in invert_permutation_numpy(perm)]) sum_f_torch2 = my_tensordort_perm(x, y, dims=(contfa, contfb), perm=perm_torch) f_keops = LazyTensor( x.reshape(self.M, 1, int(np.array((dimfa)).prod())) ).keops_tensordot( LazyTensor(y.reshape(1, self.N, int(np.array(dimfb).prod()))), dimfa, dimfb, tuple(np.array(contfa) - 1), tuple(np.array(contfb) - 1), tuple(perm), ) sum_f_keops = f_keops.sum_reduction(dim=1) self.assertTrue(torch.allclose(sum_f_keops.flatten(), sum_f_torch2.flatten())) e = torch.randn_like(sum_f_torch2) # checking gradients grad_keops = torch.autograd.grad( sum_f_keops, x, e.reshape(self.M, -1), retain_graph=True )[0] grad_torch = torch.autograd.grad(sum_f_torch2, x, e, retain_graph=True)[0] self.assertTrue( torch.allclose(grad_keops.flatten(), grad_torch.flatten(), rtol=1e-4) ) grad_keops = torch.autograd.grad(sum_f_keops, y, e.reshape(self.M, -1))[0] grad_torch = torch.autograd.grad(sum_f_torch2, y, e)[0] self.assertTrue( torch.allclose(grad_keops.flatten(), grad_torch.flatten(), rtol=1e-4) ) if __name__ == "__main__": """ run tests """ unittest.main()
keops-main
pykeops/pykeops/test/test_torch.py
import math import torch from pykeops.torch import LazyTensor B1, B2, M, N, D, DV = 2, 3, 200, 300, 300, 1 dtype = torch.float32 sum_scheme = "block_sum" torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.rand(B1, B2, M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(B1, 1, 1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(1, B2, N, DV, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y).square()).sum(dim=4) Kxy = (-Dxy).exp() if "keops" in backend: out = Kxy.__matmul__(b, sum_scheme=sum_scheme) else: out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() # print("out:",out) return out out = [] for backend in ["keops", "torch"]: out.append(fun(x, y, b, backend).squeeze()) def test_chunk_ranges(): assert torch.allclose(out[0], out[1], rtol=0.0001, atol=0.0001)
keops-main
pykeops/pykeops/test/test_chunks_ranges.py
keops-main
pykeops/pykeops/test/__init__.py
import torch from pykeops.torch import LazyTensor M, N, D = 1000, 1000, 3 torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.randn(M, 1, D, requires_grad=True, device=device_id) y = torch.randn(1, N, D, device=device_id) a = -1.23 b = 1.54 def fun(x, y, a, b, backend): if backend == "keops": x = LazyTensor(x) y = LazyTensor(y) elif backend != "torch": raise ValueError("wrong backend") Dxy = ((x * y).clamp(a, b)).sum(dim=2) Kxy = (-(Dxy**2)).exp() return Kxy.sum(dim=1) out = [] for backend in ["torch", "keops"]: out.append(fun(x, y, a, b, backend).squeeze()) out_g = [] for k, backend in enumerate(["torch", "keops"]): out_g.append(torch.autograd.grad((out[k] ** 2).sum(), [x])[0]) class TestCase: def test_lazytensor_clamp_fw(self): assert torch.allclose(out[0], out[1]) def test_lazytensor_clamp_bw(self): assert torch.allclose(out_g[0], out_g[1], atol=0.01)
keops-main
pykeops/pykeops/test/test_lazytensor_clamp.py
import math import torch from pykeops.torch import LazyTensor M, N, D, DV = 200, 300, 300, 1 dtype = torch.float32 sum_scheme = "block_sum" torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.rand(M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(N, DV, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y).square()).sum(dim=2) Kxy = (-Dxy).exp() if "keops" in backend: out = Kxy.__matmul__(b, sum_scheme=sum_scheme) else: out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() # print("out:",out) return out out = [] for backend in ["keops", "torch"]: out.append(fun(x, y, b, backend).squeeze()) def test_chunks(): assert torch.allclose(out[0], out[1], atol=0.0001)
keops-main
pykeops/pykeops/test/test_chunks.py
from pykeops.numpy import LazyTensor import numpy as np np.random.seed(0) a1 = np.random.rand(2, 1000, 5) a2 = np.ascontiguousarray(a1.transpose(2, 0, 1)).transpose(1, 2, 0) b = np.random.rand(2, 1000, 5) c = np.random.rand(2, 1000, 5) b_j = LazyTensor(b[:, None]) a1_i = LazyTensor(a1[:, :, None]) dist1 = a1_i.sqdist(b_j) kernel1 = dist1.exp() d1 = kernel1 @ c a2_i = LazyTensor(a2[:, :, None]) dist2 = a2_i.sqdist(b_j) kernel2 = dist2.exp() d2 = kernel2 @ c def test_contiguous_numpy(): assert np.allclose(d2, d1)
keops-main
pykeops/pykeops/test/test_contiguous_numpy.py
import math import numpy as np from pykeops.numpy import LazyTensor M, N, D, DV = 3000, 2000, 3, 1 dtype = np.float32 np.random.seed(0) x = np.random.rand(M, 1, D).astype(dtype) / math.sqrt(D) y = np.random.rand(1, N, D).astype(dtype) / math.sqrt(D) b = np.random.randn(N, DV).astype(dtype) a = np.empty((M, DV), dtype=dtype) def fun(x, y, b, backend, out=None): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y)).sum(axis=2) if backend == "keops": Kxy = (-Dxy).exp() out = Kxy.__matmul__(b, out=out) else: Kxy = np.exp(-Dxy) out = Kxy @ b return out backends = ["keops", "numpy"] out = [] for backend in backends: out.append(fun(x, y, b, backend, out=a).squeeze()) def test_lazytensor_gaussian_numpy_inplace(): assert np.allclose(out[0], out[1])
keops-main
pykeops/pykeops/test/test_lazytensor_gaussian_numpy_inplace.py
import math import torch from pykeops.torch import LazyTensor M, N, D, DV = 200, 300, 3, 300 dtype = torch.float32 sum_scheme = "block_sum" torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.rand(M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(N, DV, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y).square()).sum(dim=2) Kxy = (-Dxy).exp() if "keops" in backend: out = Kxy.__matmul__(b, sum_scheme=sum_scheme) else: out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() return out backends = ["keops", "torch"] out = [] for backend in backends: out.append(fun(x, y, b, backend).squeeze()) def test_finalchunk(): # print(out[0] - out[1]) assert torch.allclose(out[0], out[1], atol=0.0001)
keops-main
pykeops/pykeops/test/test_finalchunks.py
import math import torch from pykeops.torch import LazyTensor M, N, D, DV = 2000, 3000, 3, 1 dtype = torch.float32 torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" x = torch.rand(M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(N, DV, requires_grad=True, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y) ** 2).sum(dim=2) Kxy = (-Dxy).exp() if backend == "keops2D": out = LazyTensor.__matmul__(Kxy, b, backend="GPU_2D") else: out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() # print("out:",out) return out backends = ["keops2D", "torch"] out = [] for backend in backends: out.append(fun(x, y, b, backend).squeeze()) out_g = [] for k, backend in enumerate(backends): out_g.append(torch.autograd.grad((out[k] ** 2).sum(), [b], create_graph=True)[0]) out_g2 = [] for k, backend in enumerate(backends): out_g2.append(torch.autograd.grad((out_g[k] ** 2).sum(), [b])[0]) class TestCase: def test_conv2d_fw(self): assert torch.allclose(out[0], out[1]) def test_conv2d_bw1(self): assert torch.allclose(out_g[0], out_g[1]) def test_conv2d_bw2(self): assert torch.allclose(out_g2[0], out_g2[1])
keops-main
pykeops/pykeops/test/conv2d.py
import torch from pykeops.torch import LazyTensor M, N = 2, 10 # Matrix multiplication as a special case of Tensordot torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) a = torch.randn(4 * 7, requires_grad=True, device=device_id, dtype=torch.float64) b = torch.randn(7, requires_grad=True, device=device_id, dtype=torch.float64) c = a.reshape(4, 7) @ b A = LazyTensor(a[None, None, :]) B = LazyTensor(b[None, None, :]) C = A.keops_tensordot(B, (4, 7), (7,), (1,), (0,)).sum_reduction(dim=1) def test_tensordot(): assert torch.allclose(c.flatten(), C.flatten())
keops-main
pykeops/pykeops/test/test_lazytensor_tensordot.py
# Test for Clamp operation using LazyTensors import pytest import torch from pykeops.torch import LazyTensor dtype = torch.float16 M, N, D = 5, 5, 1 torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.randn(M, 1, D, dtype=dtype, requires_grad=True, device=device_id) y = torch.randn(1, N, D, dtype=dtype, device=device_id) def fun(x, y, backend): if backend == "keops": x = LazyTensor(x) y = LazyTensor(y) elif backend != "torch": raise ValueError("wrong backend") Dxy = (x - y).sum(dim=2) Kxy = Dxy return Kxy.sum(dim=0) class TestCase: out = [] @pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires a GPU") def test_float16_fw(self): for backend in ["torch", "keops"]: self.out.append(fun(x, y, backend).squeeze()) assert torch.allclose(self.out[0], self.out[1], atol=0.001, rtol=0.001) @pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires a GPU") def test_float16_bw(self): out_g = [] for k, backend in enumerate(["torch", "keops"]): out_g.append(torch.autograd.grad(self.out[k][0], [x])[0]) assert torch.allclose(out_g[0], out_g[1])
keops-main
pykeops/pykeops/test/test_float16.py
import math import pytest import torch from pykeops.torch import LazyTensor M, N, D, DV = 1000, 1000, 3, 1 dtype = torch.float32 device_id = "cpu" torch.backends.cuda.matmul.allow_tf32 = False torch.manual_seed(0) x = torch.rand(M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(N, DV, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y) ** 2).sum(dim=2) Kxy = (-Dxy).exp() if "keops" in backend: if backend.split("_")[1] == "gpu": out = Kxy.__matmul__(b, backend="GPU_1D") elif backend.split("_")[1] == "cpu": out = Kxy.__matmul__(b, backend="CPU") else: out = Kxy @ b return out out = [] for backend in ["torch", "keops_cpu"]: out.append(fun(x, y, b, backend).squeeze()) class TestCase: def test_torch_keops_cpu(self): assert torch.allclose(out[0], out[1]) @pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires a GPU") def test_torch_keops_gpu(self): assert torch.allclose(out[0], fun(x, y, b, ["keops_gpu"]).squeeze())
keops-main
pykeops/pykeops/test/test_gpu_cpu.py
import numpy as np from pykeops.numpy import LazyTensor, ComplexLazyTensor M, N, D = 1000, 1000, 3 dtype = "float32" np.random.seed(0) x = np.random.rand(M, 1, D).astype(dtype) + 1j * np.random.rand(M, 1, D).astype(dtype) y = np.random.rand(1, N, D).astype(dtype) + 1j * np.random.rand(1, N, D).astype(dtype) def fun(x, y, backend): if backend == "keops": x = LazyTensor(x) y = LazyTensor(y) Kxy = ((x * y) * y.real + x + x.real).sum(axis=2) return Kxy.sum(axis=0) out = [] for backend in ["numpy", "keops"]: out.append(fun(x, y, backend).squeeze()) def test_complex_numpy(): assert np.allclose(out[0], out[1])
keops-main
pykeops/pykeops/test/test_complex_numpy.py
from pykeops.torch import LazyTensor import torch torch.backends.cuda.matmul.allow_tf32 = False torch.manual_seed(0) a1 = torch.rand(2, 1000, 5) a2 = ((a1.permute(2, 0, 1)).contiguous()).permute(1, 2, 0) b = torch.rand(2, 1000, 5) c = torch.rand(2, 1000, 5) b_j = LazyTensor(b[:, None]) a1_i = LazyTensor(a1[:, :, None]) dist1 = a1_i.sqdist(b_j) kernel1 = dist1.exp() d1 = kernel1 @ c a2_i = LazyTensor(a2[:, :, None]) dist2 = a2_i.sqdist(b_j) kernel2 = dist2.exp() d2 = kernel2 @ c def test_contiguous_torch(): assert torch.allclose(d2, d1)
keops-main
pykeops/pykeops/test/test_contiguous_torch.py
import torch from pykeops.torch import LazyTensor M, N, D, DV = 100, 100, 3, 1 dtype = torch.float32 torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.rand(M, 1, D, requires_grad=True, device=device_id, dtype=dtype) y = torch.rand(1, N, 1, device=device_id, dtype=dtype) b = torch.randn(N, DV, device=device_id, dtype=dtype) def fun(x, y, b, backend): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) # Kxy = ((x - 0.5).mod(1, 0.2) - y).sum(dim=2) Kxy = (x.cos() - y).sum(dim=2) out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() # print("out:",out.flatten()[:10]) return out backends = ["keops", "torch"] # "keops_old" out = [] for backend in backends: out.append(fun(x, y, b, backend).squeeze()) out_g = [] for k, backend in enumerate(backends): out_g.append(torch.autograd.grad((out[k] ** 2).sum(), [x], create_graph=True)[0]) out_g2 = [] for k, backend in enumerate(backends): out_g2.append(torch.autograd.grad((out_g[k] ** 2).sum(), [x])[0]) class TestClass: def test_lazytensor_grad(self): assert torch.allclose(out[0], out[1], rtol=0.0001) def test_lazytensor_grad_bw1(self): assert torch.allclose(out_g[0], out_g[1], rtol=0.0001) def test_lazytensor_grad_bw2(self): assert torch.allclose(out_g2[0], out_g2[1], rtol=0.001)
keops-main
pykeops/pykeops/test/test_lazytensor_grad.py
# Non-Uniform Discrete Fourier Tranform example import math import torch from pykeops.torch import LazyTensor dtype = torch.float32 dtype_c = torch.complex64 M, N, D = 1000, 1000, 1 torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.rand(1, N, D, dtype=dtype_c, requires_grad=True, device=device_id) p = torch.rand(1, N, D, dtype=dtype, device=device_id) f = torch.rand(M, 1, D, dtype=dtype, device=device_id) def view_as_real(x): if torch.is_complex(x): return torch.view_as_real(x) else: return x def fun(x, p, f, backend): if "keops" in backend: x = LazyTensor(x) p = LazyTensor(p) f = LazyTensor(f) X = x * (-2 * math.pi * 1j * p * f).exp() return X.sum(dim=0) out = [] for backend in ["keops", "torch"]: out.append(fun(x, p, f, backend).squeeze()) def test_complex_fw(): assert torch.allclose(out[0], out[1]) # out_g = [] # for k, backend in enumerate(["keops", "torch"]): # if out[k].is_complex(): # out_g.append( # torch.autograd.grad((out[k].real ** 2 + out[k].imag ** 2).sum(), [x])[0] # ) # else: # out_g.append(torch.autograd.grad((out[k] ** 2).sum(), [x])[0]) # # # def test_complex_bw(): # assert torch.allclose(out_g[0], out_g[1])
keops-main
pykeops/pykeops/test/test_complex.py
import os.path import sys sys.path.append( os.path.join( os.path.dirname(os.path.abspath(__file__)), os.path.sep.join([os.pardir] * 2) ) ) sys.path.append( os.path.join( os.path.dirname(os.path.abspath(__file__)), os.path.sep.join([os.pardir] * 3), "keopscore", ) ) import unittest import itertools import numpy as np import pykeops import pykeops.config from pykeops.numpy.utils import ( np_kernel, grad_np_kernel, differences, squared_distances, log_sum_exp, np_kernel_sphere, ) class NumpyUnitTestCase(unittest.TestCase): A = int(4) # Batchdim 1 B = int(6) # Batchdim 2 M = int(10) N = int(6) D = int(3) E = int(3) nbatchdims = int(2) x = np.random.rand(M, D) a = np.random.rand(M, E) f = np.random.rand(M, 1) y = np.random.rand(N, D) b = np.random.rand(N, E) g = np.random.rand(N, 1) sigma = np.array([0.4]) X = np.random.rand(A, 1, M, D) L = np.random.rand(1, B, M, 1) Y = np.random.rand(1, B, N, D) S = np.random.rand(A, B, 1) + 1 type_to_test = ["float32", "float64"] ############################################################ def test_generic_syntax_sum(self): ############################################################ from pykeops.numpy import Genred aliases = ["p=Pm(0,1)", "a=Vj(1,1)", "x=Vi(2,3)", "y=Vj(3,3)"] formula = "Square(p-a)*Exp(x+y)" axis = 1 # 0 means summation over i, 1 means over j if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b, t in itertools.product(backend_to_test, self.type_to_test): with self.subTest(b=b, t=t): # Call cuda kernel myconv = Genred(formula, aliases, reduction_op="Sum", axis=axis) gamma_keops = myconv( self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend=b, ) # Numpy version gamma_py = np.sum( (self.sigma - self.g) ** 2 * np.exp((self.y.T[:, :, np.newaxis] + self.x.T[:, np.newaxis, :])), axis=1, ).T # compare output self.assertTrue(np.allclose(gamma_keops, gamma_py, atol=1e-6)) ############################################################ def test_generic_syntax_lse(self): ############################################################ from pykeops.numpy import Genred aliases = ["p=Pm(0,1)", "a=Vj(1,1)", "x=Vi(2,3)", "y=Vj(3,3)"] formula = "Square(p-a)*Exp(-SqNorm2(x-y))" if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b, t in itertools.product(backend_to_test, self.type_to_test): with self.subTest(b=b, t=t): # Call cuda kernel myconv = Genred(formula, aliases, reduction_op="LogSumExp", axis=1) gamma_keops = myconv( self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend=b, ) # Numpy version gamma_py = log_sum_exp( (self.sigma - self.g.T) ** 2 * np.exp(-squared_distances(self.x, self.y)), axis=1, ) # compare output self.assertTrue(np.allclose(gamma_keops.ravel(), gamma_py, atol=1e-6)) ############################################################ def test_generic_syntax_softmax(self): ############################################################ from pykeops.numpy import Genred aliases = ["p=Pm(0,1)", "a=Vj(1,1)", "x=Vi(2,3)", "y=Vj(3,3)"] formula = "Square(p-a)*Exp(-SqNorm2(x-y))" formula_weights = "y" if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b, t in itertools.product(backend_to_test, self.type_to_test): with self.subTest(b=b, t=t): # Call cuda kernel myop = Genred( formula, aliases, reduction_op="SumSoftMaxWeight", axis=1, formula2=formula_weights, ) gamma_keops = myop( self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend=b, ) # Numpy version def np_softmax(x, w): x -= np.max(x, axis=1)[:, None] # subtract the max for robustness return np.exp(x) @ w / np.sum(np.exp(x), axis=1)[:, None] gamma_py = np_softmax( (self.sigma - self.g.T) ** 2 * np.exp(-squared_distances(self.x, self.y)), self.y, ) # compare output self.assertTrue( np.allclose(gamma_keops.ravel(), gamma_py.ravel(), atol=1e-6) ) ############################################################ def test_non_contiguity(self): ############################################################ from pykeops.numpy import Genred t = self.type_to_test[0] aliases = ["p=Pm(0,1)", "a=Vj(1,1)", "x=Vi(2,3)", "y=Vj(3,3)"] formula = "Square(p-a)*Exp(-SqNorm2(y-x))" my_routine = Genred(formula, aliases, reduction_op="Sum", axis=1) gamma_keops1 = my_routine( self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend="auto", ) yc_tmp = np.ascontiguousarray(self.y.T).T # create a non contiguous copy gamma_keops2 = my_routine( self.sigma.astype(t), self.g.astype(t), self.x.astype(t), yc_tmp.astype(t) ) # check output self.assertFalse(yc_tmp.flags.c_contiguous) self.assertTrue(np.allclose(gamma_keops1, gamma_keops2)) ############################################################ def test_heterogeneous_var_aliases(self): ############################################################ from pykeops.numpy import Genred t = self.type_to_test[0] aliases = ["p=Pm(0,1)", "x=Vi(1,3)", "y=Vj(2,3)"] formula = "Square(p-Var(3,1,1))*Exp(-SqNorm2(y-x))" # Call cuda kernel myconv = Genred(formula, aliases, reduction_op="Sum", axis=1) gamma_keops = myconv( self.sigma.astype(t), self.x.astype(t), self.y.astype(t), self.g.astype(t), backend="auto", ) # Numpy version gamma_py = np.sum( (self.sigma - self.g.T) ** 2 * np.exp(-squared_distances(self.x, self.y)), axis=1, ) # compare output self.assertTrue(np.allclose(gamma_keops.ravel(), gamma_py, atol=1e-6)) ############################################################ def test_formula_simplification(self): ############################################################ from pykeops.numpy import Genred t = self.type_to_test[0] aliases = ["x=Vi(0,3)"] formula = "Grad(Grad(x + Var(1,3,1), x, Var(2,3,0)),x, Var(3,3,0))" # Call cuda kernel myconv = Genred(formula, aliases, reduction_op="Sum", axis=1) gamma_keops = myconv( self.x.astype(t), self.y.astype(t), self.x.astype(t), self.x.astype(t), backend="auto", ) # Numpy version gamma_py = np.zeros_like(self.x) # compare output self.assertTrue(np.allclose(gamma_keops, gamma_py, atol=1e-6)) ############################################################ def test_argkmin(self): ############################################################ from pykeops.numpy import Genred formula = "SqDist(x,y)" variables = [ "x = Vi(" + str(self.D) + ")", # First arg : i-variable, of size D "y = Vj(" + str(self.D) + ")", ] # Second arg : j-variable, of size D my_routine = Genred( formula, variables, reduction_op="ArgKMin", axis=1, opt_arg=3, ) c = my_routine(self.x, self.y, backend="auto").astype(int) cnp = np.argsort( np.sum((self.x[:, np.newaxis, :] - self.y[np.newaxis, :, :]) ** 2, axis=2), axis=1, )[:, :3] self.assertTrue(np.allclose(c.ravel(), cnp.ravel())) ############################################################ def test_LazyTensor_sum(self): ############################################################ from pykeops.numpy import LazyTensor full_results = [] for use_keops in [True, False]: results = [] for (x, l, y, s) in [ (self.X.astype(t), self.L.astype(t), self.Y.astype(t), self.S.astype(t)) for t in self.type_to_test ]: x_i = x[:, :, :, None, :] l_i = l[:, :, :, None, :] y_j = y[:, :, None, :, :] s_p = s[:, :, None, None, :] if use_keops: x_i, l_i, y_j, s_p = ( LazyTensor(x_i), LazyTensor(l_i), LazyTensor(y_j), LazyTensor(s_p), ) D_ij = ((l_i + x_i * y_j) ** 2 + s_p).sum(-1) if use_keops: K_ij = 1 / (1 + D_ij).exp() else: K_ij = 1 / np.exp(1 + D_ij) a_i = K_ij.sum(self.nbatchdims + 1) if use_keops: a_i = a_i.squeeze(-1) results += [a_i] full_results.append(results) for (res_keops, res_numpy) in zip(full_results[0], full_results[1]): self.assertTrue(res_keops.shape == res_numpy.shape) self.assertTrue(np.allclose(res_keops, res_numpy, atol=1e-3)) if __name__ == "__main__": unittest.main()
keops-main
pykeops/pykeops/test/test_numpy.py
import math import torch from pykeops.torch import LazyTensor B1, B2, M, N, D, DV = 3, 4, 20, 25, 3, 2 torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda" if torch.cuda.is_available() else "cpu" torch.manual_seed(1) x = torch.rand(1, B2, M, 1, D, device=device_id) / math.sqrt(D) y = torch.rand(B1, B2, 1, N, D, device=device_id) / math.sqrt(D) b = torch.randn(B1, 1, N, DV, requires_grad=True, device=device_id) def fun(x, y, b, backend): if backend == "keops": x = LazyTensor(x) y = LazyTensor(y) elif backend != "torch": raise ValueError("wrong backend") Dxy = ((x - y) ** 2).sum(dim=4) Kxy = (-Dxy).exp() out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() return out backends = ["torch", "keops"] out = [] for backend in backends: out.append(fun(x, y, b, backend).squeeze()) out_g = [] for k, backend in enumerate(backends): out_g.append(torch.autograd.grad((out[k] ** 2).sum(), [b])[0]) class TestCase: def test_lazytensor_gaussian_batch_fw(self): # print(out[0]- out[1]) assert torch.allclose(out[0], out[1], atol=1e-6) def test_lazytensor_gaussian_batch_bw(self): assert torch.allclose(out_g[0], out_g[1])
keops-main
pykeops/pykeops/test/test_lazytensor_gaussian_batch.py
import math import torch from pykeops.torch import LazyTensor M, N, D, DV = 20, 30, 3, 1 dtype = torch.float32 sum_scheme = "block_sum" torch.backends.cuda.matmul.allow_tf32 = False device_id = "cuda:0" if torch.cuda.is_available() else "cpu" torch.manual_seed(0) x = torch.rand(M, 1, D, device=device_id, dtype=dtype) / math.sqrt(D) y = torch.rand(1, N, D, device=device_id, dtype=dtype) / math.sqrt(D) b = torch.randn(N, DV, device=device_id, dtype=dtype) a = torch.empty(M, DV, device=device_id, dtype=dtype) def fun(x, y, b, backend, out=None): if "keops" in backend: x = LazyTensor(x) y = LazyTensor(y) Dxy = ((x - y).square()).sum(dim=2) Kxy = (-Dxy).exp() if "keops" in backend: Kxy.__matmul__(b, sum_scheme=sum_scheme, out=out) else: out = Kxy @ b if device_id != "cpu": torch.cuda.synchronize() # print("out:",out) return out out = [] for backend in ["keops", "torch"]: out.append(fun(x, y, b, backend, out=a).squeeze()) def test_lazytensor_gaussian_inplace(): assert torch.allclose(out[0], out[1])
keops-main
pykeops/pykeops/test/test_lazytensor_gaussian_inplace.py
import torch from pykeops.common.parse_type import get_type def make_odd_cat(y): bdims = y.shape[:-2] N, D = y.shape[-2:] if N % 2 == 0: ycut = y[..., :-1, :].view(bdims + (N - 1, D)) yend = y[..., -1, :].view(bdims + (1, D)) y = torch.cat((y, yend, ycut), dim=-2) else: y = torch.cat((y, y), dim=-2) return y, N def make_even_size(x): bdims = x.shape[:-2] M, D = x.shape[-2:] if M % 2 == 1: xend = x[..., -1, :].view(bdims + (1, D)) # used as dummy row to insert into x x = torch.cat((x, xend), dim=-2) tag_dummy = True else: tag_dummy = False return x, tag_dummy def half2half2(x): bdims = x.shape[:-2] M, D = x.shape[-2:] return ( x.view(bdims + (M // 2, 2, D)) .transpose(-1, -2) .contiguous() .view(bdims + (M, D)) ) def half22half(x): bdims = x.shape[:-2] M, D = x.shape[-2:] return ( x.view(bdims + (M // 2, D, 2)) .transpose(-1, -2) .contiguous() .view(bdims + (M, D)) ) def ranges2half2(ranges, N): # we have to convert true indices to half2 indices, and take into account the special # concatenate operation done in make_odd_cat, which doubles the number of ranges along j axis. ranges_i, slices_i, redranges_j = ranges ranges_i[:, 0] = torch.floor(ranges_i[:, 0] / 2.0).int() ranges_i[:, 1] = torch.ceil(ranges_i[:, 1] / 2.0).int() slices_i = torch.cat((slices_i, slices_i + redranges_j.shape[0])) redranges_j_block1 = torch.zeros(redranges_j.shape) redranges_j_block1[:, 0] = torch.floor(redranges_j[:, 0] / 2.0).int() redranges_j_block1[:, 1] = torch.ceil(redranges_j[:, 1] / 2.0).int() redranges_j_block2 = torch.zeros(redranges_j.shape) redranges_j_block2[:, 0] = N // 2 + torch.floor((redranges_j[:, 0] + 1) / 2.0).int() redranges_j_block2[:, 1] = N // 2 + torch.ceil((redranges_j[:, 1] + 1) / 2.0).int() if N % 2 == 0: # special treatment in case the last range goes to the end of the array if redranges_j[-1, 1] == N: redranges_j_block1[-1, 1] += 1 redranges_j_block2[-1, 1] -= 1 redranges_j = torch.cat((redranges_j, redranges_j_block2), dim=0) return ranges_i, slices_i, redranges_j def preprocess_half2(args, aliases, axis, ranges, nx, ny): # When the dtype is "half", i.e. float16, we need to use special tricks # because internally the Cuda code will use half2 data type, i.e. # vectors of two float16 scalars. So we need to : # - make a distinction between the actual nx and ny sizes of the reduction # on the Python side, i.e. for the user, and the sizes in the c++ code # which need to be divided by two (modulo the next point...) # - make a copy of data for variables corresponding to the axis of reduction, # switching the order of the pairs. To understand this, let's consider # that we have two variables x_i and y_j, with nx = ny = 2, # and we need to sum over the j axis some kernel, # i.e. compute out_i = sum_j k(x_i, y_j) for i,j ranging from 1 to 2. # After conversion to half2 data type, without any copy, we would get # only one half2 for the x_i : X=(x_0,x_1) and one half2 # for the y_j : Y=(y_0,y_1). The computation of k(X,Y), with # the rules of vectorization of Cuda, would compute only the two scalars # k(x_0,y_0) and k(x_1,y_1) and store the result as a half2. # To get the two other required kernel evaluations k(x_0,y_1) and k(x_1,y_0), # we need to create a second half2 Ytilde=(y_1,y_0). The correct # computation will then be acheived by computing k(X,Y) + k(X,Ytilde). # N is the actual size of reduction, we record it for not mixing up things # when we will do the post-process back conversion after reduction N = ny if axis == 1 else nx if ranges: # When using ranges, we need to adapt the ranges to the special copy trick if axis == 1: ranges = ranges2half2(ranges[0:3], ny) + ranges[3:6] else: ranges = ranges[0:3] + ranges2half2(ranges[3:6], nx) newargs = len(aliases) * [None] tag_dummy = False for (var_ind, sig) in enumerate(aliases): _, cat, dim, pos = get_type(sig, position_in_list=var_ind) arg = args[ pos ].data # we don't want to record our cuisine in the Autograd mechanism ! if cat == 2: arg = arg[..., None, :] # (...,D) -> (...,1,D) arg, _ = make_even_size(arg) # (...,1,D) -> (...,2,D) elif cat == axis: arg, Narg = make_odd_cat(arg) N = max(N, Narg) else: arg, tag_dummy = make_even_size(arg) arg = half2half2(arg) if cat == 2: arg = arg.view( tuple(arg.shape[:-2]) + (2 * dim,) ) # (...,2,D) -> (...,2*D) (we "hide" the factor 2 in the dimension...) newargs[pos] = arg return newargs, ranges, tag_dummy, N def postprocess_half2(out, tag_dummy, reduction_op, N): out = half22half(out) if tag_dummy: out = out[..., :-1, :] if reduction_op in ("ArgMin", "ArgMax", "ArgKMin"): outind = out elif reduction_op in ( "Min_ArgMin", "MinArgMin", "Max_ArgMax", "MaxArgMax", "KMinArgKMin", "KMin_ArgKMin", ): outind = out[..., out.shape[-1] // 2 :] else: return out if N % 2 == 0: outind[outind == N] = N - 1 outind[outind > N] -= N + 1 else: outind[outind >= N] -= N return out
keops-main
pykeops/pykeops/torch/half2_convert.py
import torch from .. import config as pykeopsconfig ########################################################## # Check Pytorch install # is the proper torch version installed ? torch_version_required = "1.3" if torch.__version__ < torch_version_required: raise ImportError( "[pyKeOps]: The pytorch version should be >=" + torch_version_required ) ########################################################## # Get GPU informations pykeopsconfig.gpu_available = torch.cuda.is_available() # use torch to detect gpu ########################################################## # Import pyKeOps routines from .generic.generic_red import Genred from .generic.generic_ops import ( generic_sum, generic_logsumexp, generic_argmin, generic_argkmin, ) from .operations import KernelSolve from .lazytensor.LazyTensor import LazyTensor, ComplexLazyTensor, Vi, Vj, Pm __all__ = sorted( [ "Genred", "generic_sum", "generic_logsumexp", "generic_argmin", "generic_argkmin", "KernelSolve", "LazyTensor", "Vi", "Vj", "Pm", ] )
keops-main
pykeops/pykeops/torch/__init__.py
import torch from pykeops.common.get_options import get_tag_backend from pykeops.common.keops_io import keops_binder from pykeops.common.operations import ConjugateGradientSolver from pykeops.common.parse_type import ( get_type, get_sizes, complete_aliases, get_optional_flags, ) from pykeops.common.utils import axis2cat from pykeops.torch.generic.generic_red import GenredAutograd from pykeops import default_device_id from pykeops.common.utils import pyKeOps_Warning class KernelSolveAutograd(torch.autograd.Function): """ This class is the entry point to pytorch auto grad engine. """ @staticmethod def forward( ctx, formula, aliases, varinvpos, alpha, backend, dtype, device_id_request, eps, ranges, optional_flags, rec_multVar_highdim, nx, ny, *args ): # N.B. when rec_multVar_highdim option is set, it means that formula is of the form "sum(F*b)", where b is a variable # with large dimension. In this case we set option multVar_highdim to allow for the use of the special "final chunk" computation # mode. However, this may not be also true for the gradients of the same formula. In fact only the gradient # with respect to variable b will have the same form. Hence, we save optional_flags current status into ctx, # before adding the multVar_highdim option. ctx.optional_flags = optional_flags.copy() if rec_multVar_highdim: optional_flags["multVar_highdim"] = 1 else: optional_flags["multVar_highdim"] = 0 tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args) # number of batch dimensions # N.B. we assume here that there is at least a cat=0 or cat=1 variable in the formula... nbatchdims = max(len(arg.shape) for arg in args) - 2 use_ranges = nbatchdims > 0 or ranges device_args = args[0].device if tagCPUGPU == 1 & tagHostDevice == 1: for i in range(1, len(args)): if args[i].device.index != device_args.index: raise ValueError( "[KeOps] Input arrays must be all located on the same device." ) if device_id_request == -1: # -1 means auto setting if device_args.index: # means args are on Gpu device_id_request = device_args.index else: device_id_request = default_device_id if tagCPUGPU == 1 else -1 else: if device_args.index: if device_args.index != device_id_request: raise ValueError( "[KeOps] Gpu device id of arrays is different from device id requested for computation." ) myconv = keops_binder["nvrtc" if tagCPUGPU else "cpp"]( tagCPUGPU, tag1D2D, tagHostDevice, use_ranges, device_id_request, formula, aliases, len(args), dtype, "torch", optional_flags, ).import_module() # Context variables: save everything to compute the gradient: ctx.formula = formula ctx.aliases = aliases ctx.varinvpos = varinvpos ctx.alpha = alpha ctx.backend = backend ctx.dtype = dtype ctx.device_id_request = device_id_request ctx.eps = eps ctx.nx = nx ctx.ny = ny ctx.myconv = myconv ctx.ranges = ranges ctx.rec_multVar_highdim = rec_multVar_highdim ctx.optional_flags = optional_flags varinv = args[varinvpos] ctx.varinvpos = varinvpos def linop(var): newargs = args[:varinvpos] + (var,) + args[varinvpos + 1 :] res = myconv.genred_pytorch( device_args, ranges, nx, ny, nbatchdims, None, *newargs ) if alpha: res += alpha * var return res global copy result = ConjugateGradientSolver("torch", linop, varinv.data, eps) # relying on the 'ctx.saved_variables' attribute is necessary if you want to be able to differentiate the output # of the backward once again. It helps pytorch to keep track of 'who is who'. ctx.save_for_backward(*args, result) return result @staticmethod def backward(ctx, G): formula = ctx.formula aliases = ctx.aliases varinvpos = ctx.varinvpos backend = ctx.backend alpha = ctx.alpha dtype = ctx.dtype device_id_request = ctx.device_id_request eps = ctx.eps nx = ctx.nx ny = ctx.ny myconv = ctx.myconv ranges = ctx.ranges optional_flags = ctx.optional_flags rec_multVar_highdim = ctx.rec_multVar_highdim args = ctx.saved_tensors[:-1] # Unwrap the saved variables nargs = len(args) result = ctx.saved_tensors[-1] # If formula takes 5 variables (numbered from 0 to 4), then the gradient # wrt. the output, G, should be given as a 6-th variable (numbered 5), # with the same dim-cat as the formula's output. eta = ( "Var(" + str(nargs) + "," + str(myconv.dimout) + "," + str(myconv.tagIJ) + ")" ) # there is also a new variable for the formula's output resvar = ( "Var(" + str(nargs + 1) + "," + str(myconv.dimout) + "," + str(myconv.tagIJ) + ")" ) newargs = args[:varinvpos] + (G,) + args[varinvpos + 1 :] KinvG = KernelSolveAutograd.apply( formula, aliases, varinvpos, alpha, backend, dtype, device_id_request, eps, ranges, optional_flags, rec_multVar_highdim, nx, ny, *newargs ) grads = [] # list of gradients wrt. args; for (var_ind, sig) in enumerate(aliases): # Run through the arguments # If the current gradient is to be discarded immediatly... if not ctx.needs_input_grad[ var_ind + 13 ]: # because of (formula, aliases, varinvpos, alpha, backend, dtype, device_id, eps, ranges, optional_flags, rec_multVar_highdim, nx, ny) grads.append(None) # Don't waste time computing it. else: # Otherwise, the current gradient is really needed by the user: if var_ind == varinvpos: grads.append(KinvG) else: # adding new aliases is way too dangerous if we want to compute # second derivatives, etc. So we make explicit references to Var<ind,dim,cat> instead. # New here (Joan) : we still add the new variables to the list of "aliases" (without giving new aliases for them) # these will not be used in the C++ code, # but are useful to keep track of the actual variables used in the formula _, cat, dim, pos = get_type(sig, position_in_list=var_ind) var = "Var(" + str(pos) + "," + str(dim) + "," + str(cat) + ")" # V formula_g = ( "Grad_WithSavedForward(" + formula + ", " + var + ", " + eta + ", " + resvar + ")" ) # Grad<F,V,G,R> aliases_g = aliases + [eta, resvar] args_g = ( args[:varinvpos] + (result,) + args[varinvpos + 1 :] + (-KinvG,) + (result,) ) # Don't forget the gradient to backprop ! # N.B.: if I understand PyTorch's doc, we should redefine this function every time we use it? genconv = GenredAutograd.apply if ( cat == 2 ): # we're referring to a parameter, so we'll have to sum both wrt 'i' and 'j' # WARNING !! : here we rely on the implementation of DiffT in files in folder keopscore/core/formulas/reductions # if tagI==cat of V is 2, then reduction is done wrt j, so we need to further sum output wrt i grad = genconv( formula_g, aliases_g, backend, dtype, device_id_request, ranges, optional_flags, None, nx, ny, None, *args_g ) # Then, sum 'grad' wrt 'i' : # I think that '.sum''s backward introduces non-contiguous arrays, # and is thus non-compatible with GenredAutograd: grad = grad.sum(0) # We replace it with a 'handmade hack' : grad = torch.ones(1, grad.shape[0]).type_as(grad.data) @ grad grad = grad.view(-1) else: grad = genconv( formula_g, aliases_g, backend, dtype, device_id_request, ranges, optional_flags, None, nx, ny, None, *args_g ) grads.append(grad) # Grads wrt. formula, aliases, varinvpos, alpha, backend, dtype, device_id_request, eps, ranges, optional_flags, rec_multVar_highdim, nx, ny, *args return ( None, None, None, None, None, None, None, None, None, None, None, None, None, *grads, ) class KernelSolve: r""" Creates a new conjugate gradient solver. Supporting the same :ref:`generic syntax <part.generic_formulas>` as :class:`torch.Genred <pykeops.torch.Genred>`, this module allows you to solve generic optimization problems of the form: .. math:: & & a^{\star} & =\operatorname*{argmin}_a \tfrac 1 2 \langle a,( \alpha \operatorname{Id}+K_{xx}) a\rangle - \langle a,b \rangle, \\\\ &\text{i.e.}\quad & a^{\star} & = (\alpha \operatorname{Id} + K_{xx})^{-1} b, where :math:`K_{xx}` is a **symmetric**, **positive** definite **linear** operator and :math:`\alpha` is a **nonnegative regularization** parameter. Note: :class:`KernelSolve` is fully compatible with PyTorch's :mod:`autograd` engine: you can **backprop** through the KernelSolve :meth:`__call__` just as if it was a vanilla PyTorch operation. Example: >>> formula = "Exp(-Norm2(x - y)) * a" # Exponential kernel >>> aliases = ["x = Vi(3)", # 1st input: target points, one dim-3 vector per line ... "y = Vj(3)", # 2nd input: source points, one dim-3 vector per column ... "a = Vj(2)"] # 3rd input: source signal, one dim-2 vector per column >>> K = Genred(formula, aliases, axis = 1) # Reduce formula along the lines of the kernel matrix >>> K_inv = KernelSolve(formula, aliases, "a", # The formula above is linear wrt. 'a' ... axis = 1) >>> # Generate some random data: >>> x = torch.randn(10000, 3, requires_grad=True).cuda() # Sampling locations >>> b = torch.randn(10000, 2).cuda() # Random observed signal >>> a = K_inv(x, x, b, alpha = .1) # Linear solve: a_i = (.1*Id + K(x,x)) \ b >>> print(a.shape) torch.Size([10000, 2]) >>> # Mean squared error: >>> print( ((( .1 * a + K(x,x,a) - b)**2 ).sqrt().sum() / len(x) ).item() ) 0.0002317614998901263 >>> [g_x] = torch.autograd.grad((a ** 2).sum(), [x]) # KernelSolve supports autograd! >>> print(g_x.shape) torch.Size([10000, 3]) """ def __init__( self, formula, aliases, varinvalias, axis=0, dtype_acc="auto", use_double_acc=False, sum_scheme="auto", enable_chunks=True, rec_multVar_highdim=None, dtype=None, cuda_type=None, ): r""" Instantiate a new KernelSolve operation. Note: :class:`KernelSolve` relies on CUDA kernels that are compiled on-the-fly and stored in a :ref:`cache directory <part.cache>` as shared libraries (".so" files) for later use. Args: formula (string): The scalar- or vector-valued expression that should be computed and reduced. The correct syntax is described in the :doc:`documentation <../../Genred>`, using appropriate :doc:`mathematical operations <../../../api/math-operations>`. aliases (list of strings): A list of identifiers of the form ``"AL = TYPE(DIM)"`` that specify the categories and dimensions of the input variables. Here: - ``AL`` is an alphanumerical alias, used in the **formula**. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0. - ``Vj``: indexation by :math:`j` along axis 1. - ``Pm``: no indexation, the input tensor is a *vector* and not a 2d array. - ``DIM`` is an integer, the dimension of the current variable. As described below, :meth:`__call__` will expect input Tensors whose shape are compatible with **aliases**. varinvalias (string): The alphanumerical **alias** of the variable with respect to which we shall perform our conjugate gradient descent. **formula** is supposed to be linear with respect to **varinvalias**, but may be more sophisticated than a mere ``"K(x,y) * {varinvalias}"``. Keyword Args: alpha (float, default = 1e-10): Non-negative **ridge regularization** parameter, added to the diagonal of the Kernel matrix :math:`K_{xx}`. axis (int, default = 0): Specifies the dimension of the kernel matrix :math:`K_{x_ix_j}` that is reduced by our routine. The supported values are: - **axis** = 0: reduction with respect to :math:`i`, outputs a ``Vj`` or ":math:`j`" variable. - **axis** = 1: reduction with respect to :math:`j`, outputs a ``Vi`` or ":math:`i`" variable. dtype_acc (string, default ``"auto"``): type for accumulator of reduction, before casting to dtype. It improves the accuracy of results in case of large sized data, but is slower. Default value "auto" will set this option to the value of dtype. The supported values are: - **dtype_acc** = ``"float16"`` : allowed only if dtype is "float16". - **dtype_acc** = ``"float32"`` : allowed only if dtype is "float16" or "float32". - **dtype_acc** = ``"float64"`` : allowed only if dtype is "float32" or "float64". use_double_acc (bool, default False): same as setting dtype_acc="float64" (only one of the two options can be set) If True, accumulate results of reduction in float64 variables, before casting to float32. This can only be set to True when data is in float32 or float64. It improves the accuracy of results in case of large sized data, but is slower. sum_scheme (string, default ``"auto"``): method used to sum up results for reductions. Default value "auto" will set this option to "block_red". Possible values are: - **sum_scheme** = ``"direct_sum"``: direct summation - **sum_scheme** = ``"block_sum"``: use an intermediate accumulator in each block before accumulating in the output. This improves accuracy for large sized data. - **sum_scheme** = ``"kahan_scheme"``: use Kahan summation algorithm to compensate for round-off errors. This improves accuracy for large sized data. enable_chunks (bool, default True): enable automatic selection of special "chunked" computation mode for accelerating reductions with formulas involving large dimension variables. """ if dtype: pyKeOps_Warning( "keyword argument dtype in Genred is deprecated ; argument is ignored." ) if cuda_type: pyKeOps_Warning( "keyword argument cuda_type in Genred is deprecated ; argument is ignored." ) self.reduction_op = "Sum" self.optional_flags = get_optional_flags( self.reduction_op, dtype_acc, use_double_acc, sum_scheme, enable_chunks, ) self.formula = ( self.reduction_op + "_Reduction(" + formula + "," + str(axis2cat(axis)) + ")" ) self.aliases = complete_aliases( formula, list(aliases) ) # just in case the user provided a tuple if varinvalias[:4] == "Var(": # varinv is given directly as Var(*,*,*) so we just have to read the index varinvpos = int(varinvalias[4 : varinvalias.find(",")]) else: # we need to recover index from alias tmp = self.aliases.copy() for (i, s) in enumerate(tmp): tmp[i] = s[: s.find("=")].strip() varinvpos = tmp.index(varinvalias) self.varinvpos = varinvpos self.rec_multVar_highdim = rec_multVar_highdim self.axis = axis def __call__( self, *args, backend="auto", device_id=-1, alpha=1e-10, eps=1e-6, ranges=None ): r""" Apply the routine on arbitrary torch Tensors. Warning: Even for variables of size 1 (e.g. :math:`a_i\in\mathbb{R}` for :math:`i\in[0,M)`), KeOps expects inputs to be formatted as 2d Tensors of size ``(M,dim)``. In practice, ``a.view(-1,1)`` should be used to turn a vector of weights into a *list of scalar values*. Args: *args (2d Tensors (variables ``Vi(..)``, ``Vj(..)``) and 1d Tensors (parameters ``Pm(..)``)): The input numerical arrays, which should all have the same ``dtype``, be **contiguous** and be stored on the **same device**. KeOps expects one array per alias, with the following compatibility rules: - All ``Vi(Dim_k)`` variables are encoded as **2d-tensors** with ``Dim_k`` columns and the same number of lines :math:`M`. - All ``Vj(Dim_k)`` variables are encoded as **2d-tensors** with ``Dim_k`` columns and the same number of lines :math:`N`. - All ``Pm(Dim_k)`` variables are encoded as **1d-tensors** (vectors) of size ``Dim_k``. Keyword Args: backend (string): Specifies the map-reduce scheme, as detailed in the documentation of the :class:`torch.Genred <pykeops.torch.Genred>` module. device_id (int, default=-1): Specifies the GPU that should be used to perform the computation; a negative value lets your system choose the default GPU. This parameter is only useful if your system has access to several GPUs. ranges (6-uple of IntTensors, None by default): Ranges of integers that specify a :doc:`block-sparse reduction scheme <../../sparsity>` with *Mc clusters along axis 0* and *Nc clusters along axis 1*, as detailed in the documentation of the :class:`torch.Genred <pykeops.torch.Genred>` module. If **None** (default), we simply use a **dense Kernel matrix** as we loop over all indices :math:`i\in[0,M)` and :math:`j\in[0,N)`. Returns: (M,D) or (N,D) Tensor: The solution of the optimization problem, stored on the same device as the input Tensors. The output of a :class:`KernelSolve` call is always a **2d-tensor** with :math:`M` or :math:`N` lines (if **axis** = 1 or **axis** = 0, respectively) and a number of columns that is inferred from the **formula**. """ dtype = args[0].dtype.__str__().split(".")[1] nx, ny = get_sizes(self.aliases, *args) return KernelSolveAutograd.apply( self.formula, self.aliases, self.varinvpos, alpha, backend, dtype, device_id, eps, ranges, self.optional_flags, self.rec_multVar_highdim, nx, ny, *args )
keops-main
pykeops/pykeops/torch/operations.py
import torch from pykeops.torch import Genred, KernelSolve from pykeops.torch.cluster import swap_axes as torch_swap_axes # from pykeops.torch.generic.generic_red import GenredLowlevel def is_on_device(x): return x.is_cuda class torchtools: copy = torch.clone exp = torch.exp log = torch.log norm = torch.norm swap_axes = torch_swap_axes Genred = Genred KernelSolve = KernelSolve arraytype = torch.Tensor float_types = [float] # GenredLowlevel = GenredLowlevel @staticmethod def eq(x, y): return torch.eq(x, y) @staticmethod def transpose(x): return x.t() @staticmethod def permute(x, *args): return x.permute(*args) @staticmethod def contiguous(x): return x.contiguous() @staticmethod def solve(A, b): return torch.solve(b, A)[0].contiguous() @staticmethod def arraysum(x, axis=None): return x.sum() if axis is None else x.sum(dim=axis) @staticmethod def long(x): return x.long() @staticmethod def size(x): return x.numel() @staticmethod def tile(*args): return torch.Tensor.repeat(*args) @staticmethod def numpy(x): return x.detach().cpu().numpy() @staticmethod def view(x, s): return x.view(s) @staticmethod def is_tensor(x): return isinstance(x, torch.Tensor) @staticmethod def dtype(x): if hasattr(x, "dtype"): return x.dtype else: return type(x) @staticmethod def dtype(x): if hasattr(x, "dtype"): return x.dtype else: return type(x) @staticmethod def detect_complex(x): if type(x) == list: return any(type(v) == complex for v in x) elif type(x) == torch.Tensor: return torch.is_complex(x) else: return type(x) == complex @staticmethod def view_as_complex(x): sh = list(x.shape) sh[-1] //= 2 sh += [2] x = x.view(sh) return torch.view_as_complex(x) @staticmethod def view_as_real(x): sh = list(x.shape) sh[-1] *= 2 return torch.view_as_real(x).view(sh) @staticmethod def dtypename(dtype): if dtype == torch.float32: return "float32" elif dtype == torch.float64: return "float64" elif dtype == torch.float16: return "float16" elif dtype == int: return int elif dtype == list: return "float32" else: raise ValueError( "[KeOps] {} data type incompatible with KeOps.".format(dtype) ) @staticmethod def rand(m, n, dtype, device): return torch.rand(m, n, dtype=dtype, device=device) @staticmethod def randn(m, n, dtype, device): return torch.randn(m, n, dtype=dtype, device=device) @staticmethod def zeros( shape, dtype, device, requires_grad=False, ): return torch.zeros( *shape, dtype=dtype, device=device, requires_grad=requires_grad ) @staticmethod def empty( shape, dtype, device, requires_grad=False, ): return torch.empty( *shape, dtype=dtype, device=device, requires_grad=requires_grad ) @staticmethod def eye(n, dtype, device): return torch.eye(n, dtype=dtype, device=device) @staticmethod def array(x, dtype, device): if dtype == "float32": dtype = torch.float32 elif dtype == "float64": dtype = torch.float64 elif dtype == "float16": dtype = torch.float16 elif dtype == "int32": dtype = torch.int32 else: raise ValueError( "[KeOps] data type " + dtype + " is incompatible with KeOps." ) return torch.tensor(x, dtype=dtype, device=device) @staticmethod def device(x): if isinstance(x, torch.Tensor): return x.device else: return None @staticmethod def get_pointer(x): return x.data_ptr() @staticmethod def device_type_index(x): if isinstance(x, torch.Tensor): dev = x.device return dev.type, dev.index else: return None, None @staticmethod def pointer(x): return x.data.data_ptr() def squared_distances(x, y): x_norm = (x**2).sum(1).reshape(-1, 1) y_norm = (y**2).sum(1).reshape(1, -1) dist = x_norm + y_norm - 2.0 * torch.matmul(x, torch.transpose(y, 0, 1)) return dist def torch_kernel(x, y, s, kernel): sq = squared_distances(x, y) _kernel = { "gaussian": lambda _sq, _s: torch.exp(-_sq / (_s * _s)), "laplacian": lambda _sq, _s: torch.exp(-torch.sqrt(_sq) / _s), "cauchy": lambda _sq, _s: 1.0 / (1 + _sq / (_s * _s)), "inverse_multiquadric": lambda _sq, _s: torch.rsqrt(1 + _sq / (_s * _s)), } return _kernel[kernel](sq, s)
keops-main
pykeops/pykeops/torch/utils.py
import torch from pykeops.common.utils import pyKeOps_Message formula = "SqNorm2(x - y)" var = ["x = Vi(3)", "y = Vj(3)"] expected_res = [63.0, 90.0] def test_torch_bindings(): """ This function try to compile a simple keops formula using the pytorch binder. """ x = torch.arange(1, 10, dtype=torch.float32).view(-1, 3) y = torch.arange(3, 9, dtype=torch.float32).view(-1, 3) import pykeops.torch as pktorch my_conv = pktorch.Genred(formula, var) if torch.allclose( my_conv(x, y).view(-1), torch.tensor(expected_res).type(torch.float32) ): pyKeOps_Message("pyKeOps with torch bindings is working!", use_tag=False) else: pyKeOps_Message("outputs wrong values...", use_tag=False)
keops-main
pykeops/pykeops/torch/test_install.py
import torch def from_matrix(ranges_i, ranges_j, keep): r"""Turns a boolean matrix into a KeOps-friendly **ranges** argument. This routine is a helper for the **block-sparse** reduction mode of KeOps, allowing you to turn clustering information (**ranges_i**, **ranges_j**) and a cluster-to-cluster boolean mask (**keep**) into integer tensors of indices that can be used to schedule the KeOps routines. Suppose that you're working with variables :math:`x_i` (:math:`i \in [0,10^6)`), :math:`y_j` (:math:`j \in [0,10^7)`), and that you want to compute a KeOps reduction over indices :math:`i` or :math:`j`: Instead of performing the full kernel dot product (:math:`10^6 \cdot 10^7 = 10^{13}` operations!), you may want to restrict yourself to interactions between points :math:`x_i` and :math:`y_j` that are "close" to each other. With KeOps, the simplest way of doing so is to: 1. Compute cluster labels for the :math:`x_i`'s and :math:`y_j`'s, using e.g. the :func:`grid_cluster` method. 2. Compute the ranges (**ranges_i**, **ranges_j**) and centroids associated to each cluster, using e.g. the :func:`cluster_ranges_centroids` method. 3. Sort the tensors ``x_i`` and ``y_j`` with :func:`sort_clusters` to make sure that the clusters are stored contiguously in memory (this step is **critical** for performance on GPUs). At this point: - the :math:`k`-th cluster of :math:`x_i`'s is given by ``x_i[ ranges_i[k,0]:ranges_i[k,1], : ]``, for :math:`k \in [0,M)`, - the :math:`\ell`-th cluster of :math:`y_j`'s is given by ``y_j[ ranges_j[l,0]:ranges_j[l,1], : ]``, for :math:`\ell \in [0,N)`. 4. Compute the :math:`(M,N)` matrix **dist** of pairwise distances between cluster centroids. 5. Apply a threshold on **dist** to generate a boolean matrix ``keep = dist < threshold``. 6. Define a KeOps reduction ``my_genred = Genred(..., axis = 0 or 1)``, as usual. 7. Compute the block-sparse reduction through ``result = my_genred(x_i, y_j, ranges = from_matrix(ranges_i,ranges_j,keep) )`` :func:`from_matrix` is thus the routine that turns a **high-level description** of your block-sparse computation (cluster ranges + boolean matrix) into a set of **integer tensors** (the **ranges** optional argument), used by KeOps to schedule computations on the GPU. Args: ranges_i ((M,2) IntTensor): List of :math:`[\text{start}_k,\text{end}_k)` indices. For :math:`k \in [0,M)`, the :math:`k`-th cluster of ":math:`i`" variables is given by ``x_i[ ranges_i[k,0]:ranges_i[k,1], : ]``, etc. ranges_j ((N,2) IntTensor): List of :math:`[\text{start}_\ell,\text{end}_\ell)` indices. For :math:`\ell \in [0,N)`, the :math:`\ell`-th cluster of ":math:`j`" variables is given by ``y_j[ ranges_j[l,0]:ranges_j[l,1], : ]``, etc. keep ((M,N) BoolTensor): If the output ``ranges`` of :func:`from_matrix` is used in a KeOps reduction, we will only compute and reduce the terms associated to pairs of "points" :math:`x_i`, :math:`y_j` in clusters :math:`k` and :math:`\ell` if ``keep[k,l] == 1``. Returns: A 6-uple of LongTensors that can be used as an optional **ranges** argument of :class:`torch.Genred <pykeops.torch.Genred>`. See the documentation of :class:`torch.Genred <pykeops.torch.Genred>` for reference. Example: >>> r_i = torch.IntTensor( [ [2,5], [7,12] ] ) # 2 clusters: X[0] = x_i[2:5], X[1] = x_i[7:12] >>> r_j = torch.IntTensor( [ [1,4], [4,9], [20,30] ] ) # 3 clusters: Y[0] = y_j[1:4], Y[1] = y_j[4:9], Y[2] = y_j[20:30] >>> x,y = torch.Tensor([1., 0.]), torch.Tensor([1.5, .5, 2.5]) # dummy "centroids" >>> dist = (x[:,None] - y[None,:])**2 >>> keep = (dist <= 1) # (2,3) matrix >>> print(keep) tensor([[1, 1, 0], [0, 1, 0]], dtype=torch.uint8) --> X[0] interacts with Y[0] and Y[1], X[1] interacts with Y[1] >>> (ranges_i,slices_i,redranges_j, ranges_j,slices_j,redranges_i) = from_matrix(r_i,r_j,keep) --> (ranges_i,slices_i,redranges_j) will be used for reductions with respect to "j" (axis=1) --> (ranges_j,slices_j,redranges_i) will be used for reductions with respect to "i" (axis=0) Information relevant if **axis** = 1: >>> print(ranges_i) # = r_i tensor([[ 2, 5], [ 7, 12]], dtype=torch.int32) --> Two "target" clusters in a reduction wrt. j >>> print(slices_i) tensor([2, 3], dtype=torch.int32) --> X[0] is associated to redranges_j[0:2] --> X[1] is associated to redranges_j[2:3] >>> print(redranges_j) tensor([[1, 4], [4, 9], [4, 9]], dtype=torch.int32) --> For X[0], i in [2,3,4], we'll reduce over j in [1,2,3] and [4,5,6,7,8] --> For X[1], i in [7,8,9,10,11], we'll reduce over j in [4,5,6,7,8] Information relevant if **axis** = 0: >>> print(ranges_j) tensor([[ 1, 4], [ 4, 9], [20, 30]], dtype=torch.int32) --> Three "target" clusters in a reduction wrt. i >>> print(slices_j) tensor([1, 3, 3], dtype=torch.int32) --> Y[0] is associated to redranges_i[0:1] --> Y[1] is associated to redranges_i[1:3] --> Y[2] is associated to redranges_i[3:3] = no one... >>> print(redranges_i) tensor([[ 2, 5], [ 2, 5], [ 7, 12]], dtype=torch.int32) --> For Y[0], j in [1,2,3], we'll reduce over i in [2,3,4] --> For Y[1], j in [4,5,6,7,8], we'll reduce over i in [2,3,4] and [7,8,9,10,11] --> For Y[2], j in [20,21,...,29], there is no reduction to be done """ I, J = torch.meshgrid( ( torch.arange(0, keep.shape[0], device=keep.device), torch.arange(0, keep.shape[1], device=keep.device), ) ) redranges_i = ranges_i[ I.t()[keep.t()] ] # Use PyTorch indexing to "stack" copies of ranges_i[...] redranges_j = ranges_j[J[keep]] slices_i = ( keep.sum(1).cumsum(0).int() ) # slice indices in the "stacked" array redranges_j slices_j = ( keep.sum(0).cumsum(0).int() ) # slice indices in the "stacked" array redranges_i return (ranges_i, slices_i, redranges_j, ranges_j, slices_j, redranges_i) if __name__ == "__main__": r_i = torch.IntTensor([[2, 5], [7, 12]]) r_j = torch.IntTensor([[1, 4], [4, 9], [20, 30]]) x, y = torch.Tensor([0.0, 1.0]), torch.Tensor([0.0, 0.7, 2.0]) dist = (x[:, None] - y[None, :]) ** 2 keep = dist <= 0.8 print(keep) for item in from_matrix(r_i, r_j, keep): print(item)
keops-main
pykeops/pykeops/torch/cluster/matrix.py
import torch def grid_cluster(x, size): r"""Simplistic clustering algorithm which distributes points into cubic bins. Args: x ((M,D) Tensor): List of points :math:`x_i \in \mathbb{R}^D`. size (float or (D,) Tensor): Dimensions of the cubic cells ("voxels"). Returns: (M,) IntTensor: Vector of integer **labels**. Two points ``x[i]`` and ``x[j]`` are in the same cluster if and only if ``labels[i] == labels[j]``. Labels are sorted in a compact range :math:`[0,C)`, where :math:`C` is the number of non-empty cubic cells. Example: >>> x = torch.Tensor([ [0.], [.1], [.9], [.05], [.5] ]) # points in the unit interval >>> labels = grid_cluster(x, .2) # bins of size .2 >>> print( labels ) tensor([0, 0, 2, 0, 1], dtype=torch.int32) """ with torch.no_grad(): # Quantize the points' positions if x.shape[1] == 1: weights = torch.IntTensor( [1], ).to(x.device) elif x.shape[1] == 2: weights = torch.IntTensor( [2**10, 1], ).to(x.device) elif x.shape[1] == 3: weights = torch.IntTensor([2**20, 2**10, 1]).to(x.device) else: raise NotImplementedError() x_ = (x / size).floor().int() x_ *= weights lab = x_.sum(1) # labels lab = lab - lab.min() # Replace arbitrary labels with unique identifiers in a compact arange u_lab = torch.unique(lab).sort()[0] N_lab = len(u_lab) foo = torch.empty(u_lab.max() + 1, dtype=torch.int32, device=x.device) foo[u_lab] = torch.arange(N_lab, dtype=torch.int32, device=x.device) lab = foo[lab] return lab
keops-main
pykeops/pykeops/torch/cluster/grid_cluster.py
from .grid_cluster import grid_cluster from .matrix import from_matrix from .utils import ( sort_clusters, cluster_ranges, cluster_centroids, cluster_ranges_centroids, swap_axes, ) # N.B.: the order is important for the autodoc in sphinx! __all__ = sorted( [ "grid_cluster", "from_matrix", "sort_clusters", "cluster_ranges", "cluster_centroids", "cluster_ranges_centroids", "swap_axes", ] )
keops-main
pykeops/pykeops/torch/cluster/__init__.py
import torch def sort_clusters(x, lab): r"""Sorts a list of points and labels to make sure that the clusters are contiguous in memory. On the GPU, **contiguous memory accesses** are key to high performances. By making sure that points in the same cluster are stored next to each other in memory, this pre-processing routine allows KeOps to compute block-sparse reductions with maximum efficiency. Warning: For unknown reasons, ``torch.bincount`` is much more efficient on *unsorted* arrays of labels... so make sure not to call ``bincount`` on the output of this routine! Args: x ((M,D) Tensor or tuple/list of (M,..) Tensors): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) IntTensor): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Returns: (M,D) Tensor or tuple/list of (M,..) Tensors, (M,) IntTensor: Sorted **point cloud(s)** and **vector of labels**. Example: >>> x = torch.Tensor( [ [0.], [5.], [.4], [.3], [2.] ]) >>> lab = torch.IntTensor([ 0, 2, 0, 0, 1 ]) >>> x_sorted, lab_sorted = sort_clusters(x, lab) >>> print(x_sorted) tensor([[0.0000], [0.4000], [0.3000], [2.0000], [5.0000]]) >>> print(lab_sorted) tensor([0, 0, 0, 1, 2], dtype=torch.int32) """ lab, perm = torch.sort(lab.view(-1)) if type(x) is tuple: x_sorted = tuple(a[perm] for a in x) elif type(x) is list: x_sorted = list(a[perm] for a in x) else: x_sorted = x[perm] return x_sorted, lab def cluster_ranges(lab, Nlab=None): r"""Computes the ``[start,end)`` indices that specify clusters in a sorted point cloud. If **lab** denotes a vector of labels :math:`\ell_i\in[0,C)`, :func:`sort_clusters` allows us to sort our point clouds and make sure that points that share the same label are stored next to each other in memory. :func:`cluster_ranges` is simply there to give you the **slice indices** that correspond to each of those :math:`C` classes. Args: x ((M,D) Tensor): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) IntTensor): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Keyword Args: Nlab ((C,) IntTensor, optional): If you have computed it already, you may specify the number of points per class through this integer vector of length :math:`C`. Returns: (C,2) IntTensor: Stacked array of :math:`[\text{start}_k, \text{end}_k )` indices in :math:`[0,M]`, for :math:`k\in[0,C)`. Example: >>> x = torch.Tensor( [ [0.], [5.], [.4], [.3], [2.] ]) >>> lab = torch.IntTensor([ 0, 2, 0, 0, 1 ]) >>> x_sorted, lab_sorted = sort_clusters(x, lab) >>> print(x_sorted) tensor([[0.0000], [0.4000], [0.3000], [2.0000], [5.0000]]) >>> print(lab_sorted) tensor([0, 0, 0, 1, 2], dtype=torch.int32) >>> ranges_i = cluster_ranges(lab) >>> print( ranges_i ) tensor([[0, 3], [3, 4], [4, 5]], dtype=torch.int32) --> cluster 0 = x_sorted[0:3, :] --> cluster 1 = x_sorted[3:4, :] --> cluster 2 = x_sorted[4:5, :] """ if Nlab is None: Nlab = torch.bincount(lab).float() pivots = torch.cat((torch.Tensor([0.0]).to(Nlab.device), Nlab.cumsum(0))) return torch.stack((pivots[:-1], pivots[1:]), dim=1).int() def cluster_centroids(x, lab, Nlab=None, weights=None, weights_c=None): r"""Computes the (weighted) centroids of classes specified by a vector of labels. If points :math:`x_i \in\mathbb{R}^D` are assigned to :math:`C` different classes by the vector of integer labels :math:`\ell_i \in [0,C)`, this function returns a collection of :math:`C` centroids .. math:: c_k = \frac{\sum_{i, \ell_i = k} w_i\cdot x_i}{\sum_{i, \ell_i=k} w_i}, where the weights :math:`w_i` are set to 1 by default. Args: x ((M,D) Tensor): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) IntTensor): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Keyword Args: Nlab ((C,) IntTensor): Number of points per class. Recomputed if None. weights ((N,) Tensor): Positive weights :math:`w_i` of each point. weights_c ((C,) Tensor): Total weight of each class. Recomputed if None. Returns: (C,D) Tensor: List of centroids :math:`c_k \in \mathbb{R}^D`. Example: >>> x = torch.Tensor([ [0.], [1.], [4.], [5.], [6.] ]) >>> lab = torch.IntTensor([ 0, 0, 1, 1, 1 ]) >>> weights = torch.Tensor([ .5, .5, 2., 1., 1. ]) >>> centroids = cluster_centroids(x, lab, weights=weights) >>> print(centroids) tensor([[0.5000], [4.7500]]) """ if Nlab is None: Nlab = torch.bincount(lab).float() if weights is not None and weights_c is None: weights_c = torch.bincount(lab, weights=weights).view(-1, 1) c = torch.zeros((len(Nlab), x.shape[1]), dtype=x.dtype, device=x.device) for d in range(x.shape[1]): if weights is None: c[:, d] = torch.bincount(lab, weights=x[:, d]) / Nlab else: c[:, d] = torch.bincount( lab, weights=x[:, d] * weights.view(-1) ) / weights_c.view(-1) return c def cluster_ranges_centroids(x, lab, weights=None, min_weight=1e-9): r"""Computes the cluster indices and centroids of a (weighted) point cloud with labels. If **x** and **lab** encode a cloud of points :math:`x_i\in\mathbb{R}^D` with labels :math:`\ell_i\in[0,C)`, for :math:`i\in[0,M)`, this routine returns: - Ranges :math:`[\text{start}_k,\text{end}_k)` compatible with :func:`sort_clusters` for :math:`k\in[0,C)`. - Centroids :math:`c_k` for each cluster :math:`k`, computed as barycenters using the weights :math:`w_i \in \mathbb{R}_{>0}`: .. math:: c_k = \frac{\sum_{i, \ell_i=k} w_i\cdot \ell_i}{\sum_{i, \ell_i=k} w_i} - Total weights :math:`\sum_{i, \ell_i=k} w_i`, for :math:`k\in[0,C)`. The weights :math:`w_i` can be given through a vector **weights** of size :math:`M`, and are set by default to 1 for all points in the cloud. Args: x ((M,D) Tensor): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) IntTensor): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Keyword Args: weights ((M,) Tensor): Positive weights :math:`w_i` that can be used to compute our barycenters. min_weight (float): For the sake of numerical stability, weights are clamped to be larger or equal to this value. Returns: (C,2) IntTensor, (C,D) Tensor, (C,) Tensor: **ranges** - Stacked array of :math:`[\text{start}_k,\text{end}_k)` indices in :math:`[0,M]`, for :math:`k\in[0,C)`, compatible with the :func:`sort_clusters` routine. **centroids** - List of centroids :math:`c_k \in \mathbb{R}^D`. **weights_c** - Total weight of each cluster. Example: >>> x = torch.Tensor([ [0.], [.5], [1.], [2.], [3.] ]) >>> lab = torch.IntTensor([ 0, 0, 1, 1, 1 ]) >>> ranges, centroids, weights_c = cluster_ranges_centroids(x, lab) >>> print(ranges) tensor([[0, 2], [2, 5]], dtype=torch.int32) --> cluster 0 = x[0:2, :] --> cluster 1 = x[2:5, :] >>> print(centroids) tensor([[0.2500], [2.0000]]) >>> print(weights_c) tensor([2., 3.]) >>> weights = torch.Tensor([ 1., .5, 1., 1., 10. ]) >>> ranges, centroids, weights_c = cluster_ranges_centroids(x, lab, weights=weights) >>> print(ranges) tensor([[0, 2], [2, 5]], dtype=torch.int32) --> cluster 0 = x[0:2, :] --> cluster 1 = x[2:5, :] >>> print(centroids) tensor([[0.1667], [2.7500]]) >>> print(weights_c) tensor([1.5000, 12.0000]) """ Nlab = torch.bincount(lab).float() if weights is not None: # Remove "zero weights" for the sake of numerical stability: weights = weights.clone() weights[weights <= min_weight] = min_weight w_c = torch.bincount(lab, weights=weights).view(-1) return ( cluster_ranges(lab, Nlab), cluster_centroids(x, lab, Nlab, weights=weights, weights_c=w_c), w_c, ) else: return cluster_ranges(lab, Nlab), cluster_centroids(x, lab, Nlab), Nlab def swap_axes(ranges): r""" Swaps the ":math:`i`" and ":math:`j`" axes of a reduction's optional **ranges** parameter. This function returns **None** if **ranges** is **None**, and swaps the :math:`i` and :math:`j` arrays of indices otherwise. """ if ranges is None: return None else: return (*ranges[3:6], *ranges[0:3])
keops-main
pykeops/pykeops/torch/cluster/utils.py
keops-main
pykeops/pykeops/torch/lazytensor/__init__.py
import torch from pykeops.common.lazy_tensor import GenericLazyTensor, ComplexGenericLazyTensor from pykeops.torch.utils import torchtools # Convenient aliases: def Var(x_or_ind, dim=None, cat=None): if dim is None: # init via data: we assume x_or_ind is data return LazyTensor(x_or_ind, axis=cat) else: # init via symbolic variable given as triplet (ind,dim,cat) return LazyTensor((x_or_ind, dim, cat)) def Vi(x_or_ind, dim=None): r""" Simple wrapper that returns an instantiation of :class:`LazyTensor` of type 0. """ return Var(x_or_ind, dim, 0) def Vj(x_or_ind, dim=None): r""" Simple wrapper that returns an instantiation of :class:`LazyTensor` of type 1. """ return Var(x_or_ind, dim, 1) def Pm(x_or_ind, dim=None): r""" Simple wrapper that returns an instantiation of :class:`LazyTensor` of type 2. """ return Var(x_or_ind, dim, 2) class LazyTensor(GenericLazyTensor): r"""Symbolic wrapper for PyTorch tensors. :class:`LazyTensor` encode numerical arrays through the combination of a symbolic, **mathematical formula** and a list of **small data arrays**. They can be used to implement efficient algorithms on objects that are **easy to define**, but **impossible to store** in memory (e.g. the matrix of pairwise distances between two large point clouds). :class:`LazyTensor` may be created from standard NumPy arrays or PyTorch tensors, combined using simple mathematical operations and converted back to NumPy arrays or PyTorch tensors with efficient reduction routines, which outperform standard tensorized implementations by two orders of magnitude. """ def __new__(self, x=None, axis=None, is_complex=False): if is_complex or torchtools.detect_complex(x): return ComplexLazyTensor(x, axis) else: return object.__new__(self) def __init__(self, x=None, axis=None, is_complex=False): super().__init__(x=x, axis=axis) def get_tools(self): self.tools = torchtools self.Genred = torchtools.Genred self.KernelSolve = torchtools.KernelSolve def lt_constructor(self, x=None, axis=None, is_complex=False): return LazyTensor(x=x, axis=axis, is_complex=is_complex) class ComplexLazyTensor(ComplexGenericLazyTensor): r"""Extension of the LazyTensor class for complex operations.""" def __init__(self, x=None, axis=None): super().__init__(x=x, axis=axis) def get_tools(self): self.tools = torchtools self.Genred = torchtools.Genred self.KernelSolve = torchtools.KernelSolve def lt_constructor(self, x=None, axis=None, is_complex=True): return LazyTensor(x=x, axis=axis, is_complex=is_complex)
keops-main
pykeops/pykeops/torch/lazytensor/LazyTensor.py
import torch from pykeops.common.get_options import get_tag_backend from pykeops.common.operations import preprocess, postprocess from pykeops.common.parse_type import ( get_type, get_sizes, complete_aliases, get_optional_flags, ) from pykeops.common.utils import axis2cat from pykeops import default_device_id from pykeops.common.utils import pyKeOps_Warning class GenredAutograd(torch.autograd.Function): """ This class is the entry point to pytorch auto grad engine. """ @staticmethod def forward( ctx, formula, aliases, backend, dtype, device_id_request, ranges, optional_flags, rec_multVar_highdim, nx, ny, out, *args ): # N.B. when rec_multVar_highdim option is set, it means that formula is of the form "sum(F*b)", where b is a variable # with large dimension. In this case we set option multVar_highdim to allow for the use of the special "final chunk" computation # mode. However, this may not be also true for the gradients of the same formula. In fact only the gradient # with respect to variable b will have the same form. Hence, we save optional_flags current status into ctx, # before adding the multVar_highdim option. ctx.optional_flags = optional_flags.copy() if rec_multVar_highdim: optional_flags["multVar_highdim"] = 1 else: optional_flags["multVar_highdim"] = 0 tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args) # number of batch dimensions # N.B. we assume here that there is at least a cat=0 or cat=1 variable in the formula... nbatchdims = max(len(arg.shape) for arg in args) - 2 use_ranges = nbatchdims > 0 or ranges device_args = args[0].device if tagCPUGPU == 1 & tagHostDevice == 1: for i in range(1, len(args)): if args[i].device.index != device_args.index: raise ValueError( "[KeOps] Input arrays must be all located on the same device." ) if device_id_request == -1: # -1 means auto setting if device_args.index: # means args are on Gpu device_id_request = device_args.index else: device_id_request = default_device_id if tagCPUGPU == 1 else -1 else: if device_args.index: if device_args.index != device_id_request: raise ValueError( "[KeOps] Gpu device id of arrays is different from device id requested for computation." ) from pykeops.common.keops_io import keops_binder myconv = keops_binder["nvrtc" if tagCPUGPU else "cpp"]( tagCPUGPU, tag1D2D, tagHostDevice, use_ranges, device_id_request, formula, aliases, len(args), dtype, "torch", optional_flags, ).import_module() # Context variables: save everything to compute the gradient: ctx.formula = formula ctx.aliases = aliases ctx.backend = backend ctx.dtype = dtype ctx.device_id_request = device_id_request ctx.ranges = ranges ctx.rec_multVar_highdim = rec_multVar_highdim ctx.myconv = myconv ctx.nx = nx ctx.ny = ny # N.B.: KeOps C++ expects contiguous data arrays test_contig = all(arg.is_contiguous() for arg in args) if not test_contig: pyKeOps_Warning( "at least one of the input tensors is not contiguous. " + "Consider using contiguous data arrays to avoid unnecessary copies." ) args = tuple(arg.contiguous() for arg in args) # N.B.: KeOps C++ expects contiguous integer arrays as ranges if ranges: ranges = tuple(r.contiguous() for r in ranges) result = myconv.genred_pytorch( device_args, ranges, nx, ny, nbatchdims, out, *args ) # relying on the 'ctx.saved_variables' attribute is necessary if you want to be able to differentiate the output # of the backward once again. It helps pytorch to keep track of 'who is who'. ctx.save_for_backward(*args, result) return result @staticmethod def backward(ctx, G): formula = ctx.formula aliases = ctx.aliases backend = ctx.backend dtype = ctx.dtype ranges = ctx.ranges optional_flags = ctx.optional_flags device_id_request = ctx.device_id_request myconv = ctx.myconv nx = ctx.nx ny = ctx.ny args = ctx.saved_tensors[:-1] # Unwrap the saved variables nargs = len(args) result = ctx.saved_tensors[-1].detach() not_supported = [ "Min_ArgMin_Reduction", "Min_Reduction", "Max_ArgMax_Reduction", "Max_Reduction", "KMin_ArgKMin_Reduction", "KMin_Reduction", ] for red in not_supported: if formula.startswith(red): raise NotImplementedError( "As of today, KeOps does not support " + "backpropagation through the " + red + " reduction. " + "Adding this feature to LazyTensors is on the cards " + "for future releases... But until then, you may want " + "to consider extracting the relevant integer indices " + "with a '.argmin()', '.argmax()' or '.argKmin()' reduction " + "before using PyTorch advanced indexing to create a fully-differentiable " + "tensor containing the relevant 'minimal' values." ) # If formula takes 5 variables (numbered from 0 to 4), then the gradient # wrt. the output, G, should be given as a 6-th variable (numbered 5), # with the same dim-cat as the formula's output. eta = ( "Var(" + str(nargs) + "," + str(myconv.dimout) + "," + str(myconv.tagIJ) + ")" ) # there is also a new variable for the formula's output resvar = ( "Var(" + str(nargs + 1) + "," + str(myconv.dimout) + "," + str(myconv.tagIJ) + ")" ) # convert to contiguous: G = G.contiguous() grads = [] # list of gradients wrt. args; for (var_ind, (sig, arg_ind)) in enumerate( zip(aliases, args) ): # Run through the arguments # If the current gradient is to be discarded immediatly... if not ctx.needs_input_grad[ var_ind + 11 ]: # because of (formula, aliases, backend, dtype, device_id_request, ranges, optional_flags, rec_multVar_highdim, nx, ny, out) grads.append(None) # Don't waste time computing it. else: # Otherwise, the current gradient is really needed by the user. Adding new aliases is way too dangerous if we want to compute # second derivatives, etc. So we make explicit references to Var<ind,dim,cat> instead. # New here (Joan) : we still add the new variables to the list of "aliases" (without # giving new aliases for them) these will not be used in the C++ code, # but are useful to keep track of the actual variables used in the formula _, cat, dim, pos = get_type(sig, position_in_list=var_ind) var = "Var(" + str(pos) + "," + str(dim) + "," + str(cat) + ")" # V formula_g = ( "Grad_WithSavedForward(" + formula + ", " + var + ", " + eta + ", " + resvar + ")" ) # Grad<F,V,G,R> aliases_g = aliases + [eta, resvar] args_g = ( args + (G,) + (result,) ) # Don't forget the gradient to backprop ! # N.B.: if I understand PyTorch's doc, we should redefine this function every time we use it? genconv = GenredAutograd.apply # For a reduction of the type sum(F*b), with b a variable, and if we require the gradient # with respect to b, the gradient will be of same type sum(F*eta). So we set again rec_multVar option # in this case. if pos == ctx.rec_multVar_highdim: rec_multVar_highdim = ( nargs # nargs is the position of variable eta. ) else: rec_multVar_highdim = None if ( cat == 2 ): # we're referring to a parameter, so we'll have to sum both wrt 'i' and 'j' # WARNING !! : here we rely on the implementation of DiffT in files in folder keopscore/core/formulas/reductions # if tagI==cat of V is 2, then reduction is done wrt j, so we need to further sum output wrt i grad = genconv( formula_g, aliases_g, backend, dtype, device_id_request, ranges, optional_flags, rec_multVar_highdim, nx, ny, None, *args_g ) # Then, sum 'grad' wrt 'i' : # I think that '.sum''s backward introduces non-contiguous arrays, # and is thus non-compatible with GenredAutograd: grad = grad.sum(0) # We replace it with a 'handmade hack' : # grad = torch.ones(1, grad.shape[0]).type_as(grad.data) @ grad # grad = grad.view(-1) grad = (1.0 * grad).sum(-2) dims_to_collapse = tuple( i for (i, (x, y)) in enumerate( zip(arg_ind.shape[:-1], grad.shape[:-1]) ) if x < y ) else: grad = genconv( formula_g, aliases_g, backend, dtype, device_id_request, ranges, optional_flags, rec_multVar_highdim, nx, ny, None, *args_g ) # N.B.: 'grad' is always a full [A, .., B, M, D] or [A, .., B, N, D] or [A, .., B, D] tensor, # whereas 'arg_ind' may have some broadcasted batched dimensions. # Before returning our gradient, we must collapse 'grad' with a .sum() operation, # which is the adjoint of the good old "repmat" that could have been used # to emulate the batch broadcasting. dims_to_collapse = tuple( i for (i, (x, y)) in enumerate( zip(arg_ind.shape[:-2], grad.shape[:-2]) ) if x < y ) if dims_to_collapse != (): grad = (1.0 * grad).sum(dims_to_collapse, keepdim=True) grad = grad.reshape( arg_ind.shape ) # The gradient should have the same shape as the input! grads.append(grad) # Grads wrt. formula, aliases, backend, dtype, device_id_request, ranges, optional_flags, rec_multVar_highdim, nx, ny, out, *args return ( None, None, None, None, None, None, None, None, None, None, None, *grads, ) class Genred: r""" Creates a new generic operation. This is KeOps' main function, whose usage is documented in the :doc:`user-guide <../../Genred>`, the :doc:`gallery of examples <../../../_auto_examples/index>` and the :doc:`high-level tutorials <../../../_auto_tutorials/index>`. Taking as input a handful of strings and integers that specify a custom Map-Reduce operation, it returns a C++ wrapper that can be called just like any other PyTorch function. Note: :func:`Genred` is fully compatible with PyTorch's :mod:`autograd` engine: You can **backprop** through the KeOps :meth:`__call__` just as if it was a vanilla PyTorch operation (except for Min or Max reduction types, see :ref:`reductions <part.reduction>`) Example: >>> my_conv = Genred('Exp(-SqNorm2(x - y))', # formula ... ['x = Vi(3)', # 1st input: dim-3 vector per line ... 'y = Vj(3)'], # 2nd input: dim-3 vector per column ... reduction_op='Sum', # we also support LogSumExp, Min, etc. ... axis=1) # reduce along the lines of the kernel matrix >>> # Apply it to 2d arrays x and y with 3 columns and a (huge) number of lines >>> x = torch.randn(1000000, 3, requires_grad=True).cuda() >>> y = torch.randn(2000000, 3).cuda() >>> a = my_conv(x, y) # a_i = sum_j exp(-|x_i-y_j|^2) >>> print(a.shape) torch.Size([1000000, 1]) >>> [g_x] = torch.autograd.grad((a ** 2).sum(), [x]) # KeOps supports autograd! >>> print(g_x.shape) torch.Size([1000000, 3]) """ def __init__( self, formula, aliases, reduction_op="Sum", axis=0, dtype=None, opt_arg=None, formula2=None, cuda_type=None, dtype_acc="auto", use_double_acc=False, sum_scheme="auto", enable_chunks=True, rec_multVar_highdim=False, ): r""" Instantiate a new generic operation. Note: :class:`Genred` relies on C++ or CUDA kernels that are compiled on-the-fly, and stored in a :ref:`cache directory <part.cache>` as shared libraries (".so" files) for later use. Args: formula (string): The scalar- or vector-valued expression that should be computed and reduced. The correct syntax is described in the :doc:`documentation <../../Genred>`, using appropriate :doc:`mathematical operations <../../../api/math-operations>`. aliases (list of strings): A list of identifiers of the form ``"AL = TYPE(DIM)"`` that specify the categories and dimensions of the input variables. Here: - ``AL`` is an alphanumerical alias, used in the **formula**. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0. - ``Vj``: indexation by :math:`j` along axis 1. - ``Pm``: no indexation, the input tensor is a *vector* and not a 2d array. - ``DIM`` is an integer, the dimension of the current variable. As described below, :meth:`__call__` will expect as input Tensors whose shape are compatible with **aliases**. Keyword Args: reduction_op (string, default = ``"Sum"``): Specifies the reduction operation that is applied to reduce the values of ``formula(x_i, y_j, ...)`` along axis 0 or axis 1. The supported values are one of :ref:`part.reduction`. axis (int, default = 0): Specifies the dimension of the "kernel matrix" that is reduced by our routine. The supported values are: - **axis** = 0: reduction with respect to :math:`i`, outputs a ``Vj`` or ":math:`j`" variable. - **axis** = 1: reduction with respect to :math:`j`, outputs a ``Vi`` or ":math:`i`" variable. opt_arg (int, default = None): If **reduction_op** is in ``["KMin", "ArgKMin", "KMin_ArgKMin"]``, this argument allows you to specify the number ``K`` of neighbors to consider. dtype_acc (string, default ``"auto"``): type for accumulator of reduction, before casting to dtype. It improves the accuracy of results in case of large sized data, but is slower. Default value "auto" will set this option to the value of dtype. The supported values are: - **dtype_acc** = ``"float16"`` : allowed only if dtype is "float16". - **dtype_acc** = ``"float32"`` : allowed only if dtype is "float16" or "float32". - **dtype_acc** = ``"float64"`` : allowed only if dtype is "float32" or "float64".. use_double_acc (bool, default False): same as setting dtype_acc="float64" (only one of the two options can be set) If True, accumulate results of reduction in float64 variables, before casting to float32. This can only be set to True when data is in float32 or float64. It improves the accuracy of results in case of large sized data, but is slower. sum_scheme (string, default ``"auto"``): method used to sum up results for reductions. Default value "auto" will set this option to "block_red". Possible values are: - **sum_scheme** = ``"direct_sum"``: direct summation - **sum_scheme** = ``"block_sum"``: use an intermediate accumulator in each block before accumulating in the output. This improves accuracy for large sized data. - **sum_scheme** = ``"kahan_scheme"``: use Kahan summation algorithm to compensate for round-off errors. This improves accuracy for large sized data. enable_chunks (bool, default True): for Gpu mode only, enable automatic selection of special "chunked" computation mode for accelerating reductions with formulas involving large dimension variables. rec_multVar_highdim (bool, default False): for Gpu mode only, enable special "final chunked" computation mode for accelerating reductions with formulas involving large dimension variables. Beware ! This will only work if the formula has the very special form that allows such computation mode. """ if dtype: pyKeOps_Warning( "keyword argument dtype in Genred is deprecated ; argument is ignored." ) if cuda_type: pyKeOps_Warning( "keyword argument cuda_type in Genred is deprecated ; argument is ignored." ) self.reduction_op = reduction_op reduction_op_internal, formula2 = preprocess(reduction_op, formula2) self.optional_flags = get_optional_flags( reduction_op_internal, dtype_acc, use_double_acc, sum_scheme, enable_chunks, ) str_opt_arg = "," + str(opt_arg) if opt_arg else "" str_formula2 = "," + formula2 if formula2 else "" self.formula = ( reduction_op_internal + "_Reduction(" + formula + str_opt_arg + "," + str(axis2cat(axis)) + str_formula2 + ")" ) self.aliases = complete_aliases( self.formula, list(aliases) ) # just in case the user provided a tuple self.axis = axis self.opt_arg = opt_arg self.rec_multVar_highdim = rec_multVar_highdim def __call__(self, *args, backend="auto", device_id=-1, ranges=None, out=None): r""" To apply the routine on arbitrary torch Tensors. .. warning: Even for variables of size 1 (e.g. :math:`a_i\in\mathbb{R}` for :math:`i\in[0,M)`), KeOps expects inputs to be formatted as 2d Tensors of size ``(M,dim)``. In practice, ``a.view(-1,1)`` should be used to turn a vector of weights into a *list of scalar values*. Args: *args (2d Tensors (variables ``Vi(..)``, ``Vj(..)``) and 1d Tensors (parameters ``Pm(..)``)): The input numerical arrays, which should all have the same ``dtype``, be **contiguous** and be stored on the **same device**. KeOps expects one array per alias, with the following compatibility rules: - All ``Vi(Dim_k)`` variables are encoded as **2d-tensors** with ``Dim_k`` columns and the same number of lines :math:`M`. - All ``Vj(Dim_k)`` variables are encoded as **2d-tensors** with ``Dim_k`` columns and the same number of lines :math:`N`. - All ``Pm(Dim_k)`` variables are encoded as **1d-tensors** (vectors) of size ``Dim_k``. Keyword Args: backend (string): Specifies the map-reduce scheme. The supported values are: - ``"auto"`` (default): let KeOps decide which backend is best suited to your data, based on the tensors' shapes. ``"GPU_1D"`` will be chosen in most cases. - ``"CPU"``: use a simple C++ ``for`` loop on a single CPU core. - ``"GPU_1D"``: use a `simple multithreading scheme <https://github.com/getkeops/keops/blob/main/keops/core/GpuConv1D.cu>`_ on the GPU - basically, one thread per value of the output index. - ``"GPU_2D"``: use a more sophisticated `2D parallelization scheme <https://github.com/getkeops/keops/blob/main/keops/core/GpuConv2D.cu>`_ on the GPU. - ``"GPU"``: let KeOps decide which one of the ``"GPU_1D"`` or the ``"GPU_2D"`` scheme will run faster on the given input. device_id (int, default=-1): Specifies the GPU that should be used to perform the computation; a negative value lets your system choose the default GPU. This parameter is only useful if your system has access to several GPUs. ranges (6-uple of IntTensors, None by default): Ranges of integers that specify a :doc:`block-sparse reduction scheme <../../sparsity>` with *Mc clusters along axis 0* and *Nc clusters along axis 1*. If None (default), we simply loop over all indices :math:`i\in[0,M)` and :math:`j\in[0,N)`. **The first three ranges** will be used if **axis** = 1 (reduction along the axis of ":math:`j` variables"), and to compute gradients with respect to ``Vi(..)`` variables: - ``ranges_i``, (Mc,2) IntTensor - slice indices :math:`[\operatorname{start}^I_k,\operatorname{end}^I_k)` in :math:`[0,M]` that specify our Mc blocks along the axis 0 of ":math:`i` variables". - ``slices_i``, (Mc,) IntTensor - consecutive slice indices :math:`[\operatorname{end}^S_1, ..., \operatorname{end}^S_{M_c}]` that specify Mc ranges :math:`[\operatorname{start}^S_k,\operatorname{end}^S_k)` in ``redranges_j``, with :math:`\operatorname{start}^S_k = \operatorname{end}^S_{k-1}`. **The first 0 is implicit**, meaning that :math:`\operatorname{start}^S_0 = 0`, and we typically expect that ``slices_i[-1] == len(redrange_j)``. - ``redranges_j``, (Mcc,2) IntTensor - slice indices :math:`[\operatorname{start}^J_\ell,\operatorname{end}^J_\ell)` in :math:`[0,N]` that specify reduction ranges along the axis 1 of ":math:`j` variables". If **axis** = 1, these integer arrays allow us to say that ``for k in range(Mc)``, the output values for indices ``i in range( ranges_i[k,0], ranges_i[k,1] )`` should be computed using a Map-Reduce scheme over indices ``j in Union( range( redranges_j[l, 0], redranges_j[l, 1] ))`` for ``l in range( slices_i[k-1], slices_i[k] )``. **Likewise, the last three ranges** will be used if **axis** = 0 (reduction along the axis of ":math:`i` variables"), and to compute gradients with respect to ``Vj(..)`` variables: - ``ranges_j``, (Nc,2) IntTensor - slice indices :math:`[\operatorname{start}^J_k,\operatorname{end}^J_k)` in :math:`[0,N]` that specify our Nc blocks along the axis 1 of ":math:`j` variables". - ``slices_j``, (Nc,) IntTensor - consecutive slice indices :math:`[\operatorname{end}^S_1, ..., \operatorname{end}^S_{N_c}]` that specify Nc ranges :math:`[\operatorname{start}^S_k,\operatorname{end}^S_k)` in ``redranges_i``, with :math:`\operatorname{start}^S_k = \operatorname{end}^S_{k-1}`. **The first 0 is implicit**, meaning that :math:`\operatorname{start}^S_0 = 0`, and we typically expect that ``slices_j[-1] == len(redrange_i)``. - ``redranges_i``, (Ncc,2) IntTensor - slice indices :math:`[\operatorname{start}^I_\ell,\operatorname{end}^I_\ell)` in :math:`[0,M]` that specify reduction ranges along the axis 0 of ":math:`i` variables". If **axis** = 0, these integer arrays allow us to say that ``for k in range(Nc)``, the output values for indices ``j in range( ranges_j[k,0], ranges_j[k,1] )`` should be computed using a Map-Reduce scheme over indices ``i in Union( range( redranges_i[l, 0], redranges_i[l, 1] ))`` for ``l in range( slices_j[k-1], slices_j[k] )``. out (2d Tensor, None by default): The output numerical array, for in-place computation. If provided, the output array should all have the same ``dtype``, be **contiguous** and be stored on the **same device** as the arguments. Moreover it should have the correct shape for the output. Returns: (M,D) or (N,D) Tensor: The output of the reduction, stored on the same device as the input Tensors. The output of a Genred call is always a **2d-tensor** with :math:`M` or :math:`N` lines (if **axis** = 1 or **axis** = 0, respectively) and a number of columns that is inferred from the **formula**. """ dtype = args[0].dtype.__str__().split(".")[1] nx, ny = get_sizes(self.aliases, *args) nout, nred = (nx, ny) if self.axis == 1 else (ny, nx) if "Arg" in self.reduction_op: # when using Arg type reductions, # if nred is greater than 16 millions and dtype=float32, the result is not reliable # because we encode indices as floats, so we raise an exception ; # same with float16 type and nred>2048 if nred > 1.6e7 and dtype in ("float32", "float"): raise ValueError( "size of input array is too large for Arg type reduction with single precision. Use double precision." ) elif nred > 2048 and dtype in ("float16", "half"): raise ValueError( "size of input array is too large for Arg type reduction with float16 dtype.." ) out = GenredAutograd.apply( self.formula, self.aliases, backend, dtype, device_id, ranges, self.optional_flags, self.rec_multVar_highdim, nx, ny, out, *args ) return postprocess(out, "torch", self.reduction_op, nout, self.opt_arg, dtype)
keops-main
pykeops/pykeops/torch/generic/generic_red.py
keops-main
pykeops/pykeops/torch/generic/__init__.py
from pykeops.common.parse_type import get_type from pykeops.common.utils import cat2axis from pykeops.torch import Genred def generic_sum(formula, output, *aliases, **kwargs): r"""Alias for :class:`torch.Genred <pykeops.torch.Genred>` with a "Sum" reduction. Args: formula (string): Symbolic KeOps expression, as in :class:`torch.Genred <pykeops.torch.Genred>`. output (string): An identifier of the form ``"AL = TYPE(DIM)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. - ``DIM`` is an integer, the dimension of the output variable; it should be compatible with **formula**. *aliases (strings): List of identifiers, as in :class:`torch.Genred <pykeops.torch.Genred>`. Keyword Args: Returns: A generic reduction that can be called on arbitrary Torch tensors, as documented in :class:`torch.Genred <pykeops.torch.Genred>`. Example: >>> my_conv = generic_sum( # Custom Kernel Density Estimator ... 'Exp(-SqNorm2(x - y))', # Formula ... 'a = Vi(1)', # Output: 1 scalar per line ... 'x = Vi(3)', # 1st input: dim-3 vector per line ... 'y = Vj(3)') # 2nd input: dim-3 vector per line >>> # Apply it to 2d arrays x and y with 3 columns and a (huge) number of lines >>> x = torch.randn(1000000, 3, requires_grad=True).cuda() >>> y = torch.randn(2000000, 3).cuda() >>> a = my_conv(x, y) # a_i = sum_j exp(-|x_i-y_j|^2) >>> print(a.shape) torch.Size([1000000, 1]) """ _, cat, _, _ = get_type(output) axis = cat2axis(cat) return Genred(formula, aliases, reduction_op="Sum", axis=axis, **kwargs) def generic_logsumexp(formula, output, *aliases, **kwargs): r"""Alias for :class:`torch.Genred <pykeops.torch.Genred>` with a "LogSumExp" reduction. Args: formula (string): Scalar-valued symbolic KeOps expression, as in :class:`torch.Genred <pykeops.torch.Genred>`. output (string): An identifier of the form ``"AL = TYPE(1)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. *aliases (strings): List of identifiers, as in :class:`torch.Genred <pykeops.torch.Genred>`. Keyword Args: Returns: A generic reduction that can be called on arbitrary Torch tensors, as documented in :class:`torch.Genred <pykeops.torch.Genred>`. Example: Log-likelihood of a Gaussian Mixture Model, .. math:: a_i~=~f(x_i)~&=~ \log \sum_{j=1}^{N} \exp(-\gamma\cdot\|x_i-y_j\|^2)\cdot b_j \\\\ ~&=~ \log \sum_{j=1}^{N} \exp\big(-\gamma\cdot\|x_i-y_j\|^2 \,+\, \log(b_j) \big). >>> log_likelihood = generic_logsumexp( ... '(-(g * SqNorm2(x - y))) + b', # Formula ... 'a = Vi(1)', # Output: 1 scalar per line ... 'x = Vi(3)', # 1st input: dim-3 vector per line ... 'y = Vj(3)', # 2nd input: dim-3 vector per line ... 'g = Pm(1)', # 3rd input: vector of size 1 ... 'b = Vj(1)') # 4th input: 1 scalar per line >>> x = torch.randn(1000000, 3, requires_grad=True).cuda() >>> y = torch.randn(2000000, 3).cuda() >>> g = torch.Tensor([.5]).cuda() # Parameter of our GMM >>> b = torch.rand(2000000, 1).cuda() # Positive weights... >>> b = b / b.sum() # Normalized to get a probability measure >>> a = log_likelihood(x, y, g, b.log()) # a_i = log sum_j exp(-g*|x_i-y_j|^2) * b_j >>> print(a.shape) torch.Size([1000000, 1]) """ _, cat, _, _ = get_type(output) axis = cat2axis(cat) return Genred(formula, aliases, reduction_op="LogSumExp", axis=axis, **kwargs) def generic_argkmin(formula, output, *aliases, **kwargs): r"""Alias for :class:`torch.Genred <pykeops.torch.Genred>` with an "ArgKMin" reduction. Args: formula (string): Scalar-valued symbolic KeOps expression, as in :class:`torch.Genred <pykeops.torch.Genred>`. output (string): An identifier of the form ``"AL = TYPE(K)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. - ``K`` is an integer, the number of values to extract. *aliases (strings): List of identifiers, as in :class:`torch.Genred <pykeops.torch.Genred>`. Keyword Args: Returns: A generic reduction that can be called on arbitrary Torch tensors, as documented in :class:`torch.Genred <pykeops.torch.Genred>`. Example: Bruteforce K-nearest neighbors search in dimension 100: >>> knn = generic_argkmin( ... 'SqDist(x, y)', # Formula ... 'a = Vi(3)', # Output: 3 scalars per line ... 'x = Vi(100)', # 1st input: dim-100 vector per line ... 'y = Vj(100)') # 2nd input: dim-100 vector per line >>> x = torch.randn(5, 100) >>> y = torch.randn(20000, 100) >>> a = knn(x, y) >>> print(a) tensor([[ 9054., 11653., 11614.], [13466., 11903., 14180.], [14164., 8809., 3799.], [ 2092., 3323., 18479.], [14433., 11315., 11841.]]) >>> print( (x - y[ a[:,0].long() ]).norm(dim=1) ) # Distance to the nearest neighbor tensor([10.7933, 10.3235, 10.1218, 11.4919, 10.5100]) >>> print( (x - y[ a[:,1].long() ]).norm(dim=1) ) # Distance to the second neighbor tensor([11.3702, 10.6550, 10.7646, 11.5676, 11.1356]) >>> print( (x - y[ a[:,2].long() ]).norm(dim=1) ) # Distance to the third neighbor tensor([11.3820, 10.6725, 10.8510, 11.6071, 11.1968]) """ _, cat, k, _ = get_type(output) axis = cat2axis(cat) return Genred( formula, aliases, reduction_op="ArgKMin", axis=axis, opt_arg=k, **kwargs ) def generic_argmin(formula, output, *aliases, **kwargs): r"""Alias for :class:`torch.Genred <pykeops.torch.Genred>` with an "ArgMin" reduction. Args: formula (string): Scalar-valued symbolic KeOps expression, as in :class:`torch.Genred <pykeops.torch.Genred>`. output (string): An identifier of the form ``"AL = TYPE(1)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. *aliases (strings): List of identifiers, as in :class:`torch.Genred <pykeops.torch.Genred>`. Keyword Args: Returns: A generic reduction that can be called on arbitrary Torch tensors, as documented in :class:`torch.Genred <pykeops.torch.Genred>`. Example: Bruteforce nearest neighbor search in dimension 100: >>> nearest_neighbor = generic_argmin( ... 'SqDist(x, y)', # Formula ... 'a = Vi(1)', # Output: 1 scalar per line ... 'x = Vi(100)', # 1st input: dim-100 vector per line ... 'y = Vj(100)') # 2nd input: dim-100 vector per line >>> x = torch.randn(5, 100) >>> y = torch.randn(20000, 100) >>> a = nearest_neighbor(x, y) >>> print(a) tensor([[ 8761.], [ 2836.], [ 906.], [16130.], [ 3158.]]) >>> dists = (x - y[ a.view(-1).long() ] ).norm(dim=1) # Distance to the nearest neighbor >>> print(dists) tensor([10.5926, 10.9132, 9.9694, 10.1396, 10.1955]) """ _, cat, _, _ = get_type(output) axis = cat2axis(cat) return Genred(formula, aliases, reduction_op="ArgMin", axis=axis, **kwargs)
keops-main
pykeops/pykeops/torch/generic/generic_ops.py
########################################################## # Import pyKeOps routines from .generic.generic_red import Genred from .operations import KernelSolve from .generic.generic_ops import ( generic_sum, generic_logsumexp, generic_argmin, generic_argkmin, ) from .lazytensor.LazyTensor import LazyTensor, ComplexLazyTensor, Vi, Vj, Pm __all__ = sorted( [ "Genred", "generic_sum", "generic_logsumexp", "generic_argmin", "generic_argkmin", "KernelSolve", "LazyTensor", "Vi", "Vj", "Pm", ] )
keops-main
pykeops/pykeops/numpy/__init__.py
import numpy as np from pykeops.common.get_options import get_tag_backend from pykeops.common.keops_io import keops_binder from pykeops.common.operations import ConjugateGradientSolver from pykeops.common.parse_type import get_sizes, complete_aliases, get_optional_flags from pykeops.common.utils import axis2cat from pykeops import default_device_id from pykeops.common.utils import pyKeOps_Warning class KernelSolve: r""" Creates a new conjugate gradient solver. Supporting the same :ref:`generic syntax <part.generic_formulas>` as :class:`numpy.Genred <pykeops.numpy.Genred>`, this module allows you to solve generic optimization problems of the form: .. math:: & & a^{\star} & =\operatorname*{argmin}_a \tfrac 1 2 \langle a,( \alpha \operatorname{Id}+K_{xx}) a\rangle - \langle a,b \rangle, \\\\ &\text{i.e.}\quad & a^{\star} & = (\alpha \operatorname{Id} + K_{xx})^{-1} b, where :math:`K_{xx}` is a **symmetric**, **positive** definite **linear** operator and :math:`\alpha` is a **nonnegative regularization** parameter. Example: >>> formula = "Exp(-Norm2(x - y)) * a" # Exponential kernel >>> aliases = ["x = Vi(3)", # 1st input: target points, one dim-3 vector per line ... "y = Vj(3)", # 2nd input: source points, one dim-3 vector per column ... "a = Vj(2)"] # 3rd input: source signal, one dim-2 vector per column >>> K = Genred(formula, aliases, axis = 1) # Reduce formula along the lines of the kernel matrix >>> K_inv = KernelSolve(formula, aliases, "a", # The formula above is linear wrt. 'a' ... axis = 1) >>> # Generate some random data: >>> x = np.random.randn(10000, 3) # Sampling locations >>> b = np.random.randn(10000, 2) # Random observed signal >>> a = K_inv(x, x, b, alpha = .1) # Linear solve: a_i = (.1*Id + K(x,x)) \ b >>> print(a.shape) (10000, 2) >>> # Mean squared error: >>> print( ( np.sum( np.sqrt( ( .1 * a + K(x,x,a) - b)**2 ) ) / len(x) ).item() ) 1.5619025770417854e-06 """ def __init__( self, formula, aliases, varinvalias, axis=0, dtype=None, opt_arg=None, dtype_acc="auto", use_double_acc=False, sum_scheme="auto", enable_chunks=True, rec_multVar_highdim=None, ): r""" Instantiate a new KernelSolve operation. Note: :class:`KernelSolve` relies on C++ or CUDA kernels that are compiled on-the-fly and stored in a :ref:`cache directory <part.cache>` as shared libraries (".so" files) for later use. Args: formula (string): The scalar- or vector-valued expression that should be computed and reduced. The correct syntax is described in the :doc:`documentation <../../Genred>`, using appropriate :doc:`mathematical operations <../../../api/math-operations>`. aliases (list of strings): A list of identifiers of the form ``"AL = TYPE(DIM)"`` that specify the categories and dimensions of the input variables. Here: - ``AL`` is an alphanumerical alias, used in the **formula**. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0. - ``Vj``: indexation by :math:`j` along axis 1. - ``Pm``: no indexation, the input tensor is a *vector* and not a 2d array. - ``DIM`` is an integer, the dimension of the current variable. As described below, :meth:`__call__` will expect input arrays whose shape are compatible with **aliases**. varinvalias (string): The alphanumerical **alias** of the variable with respect to which we shall perform our conjugate gradient descent. **formula** is supposed to be linear with respect to **varinvalias**, but may be more sophisticated than a mere ``"K(x,y) * {varinvalias}"``. Keyword Args: axis (int, default = 0): Specifies the dimension of the kernel matrix :math:`K_{x_ix_j}` that is reduced by our routine. The supported values are: - **axis** = 0: reduction with respect to :math:`i`, outputs a ``Vj`` or ":math:`j`" variable. - **axis** = 1: reduction with respect to :math:`j`, outputs a ``Vi`` or ":math:`i`" variable. dtype_acc (string, default ``"auto"``): type for accumulator of reduction, before casting to dtype. It improves the accuracy of results in case of large sized data, but is slower. Default value "auto" will set this option to the value of dtype. The supported values are: - **dtype_acc** = ``"float16"`` : allowed only if dtype is "float16". - **dtype_acc** = ``"float32"`` : allowed only if dtype is "float16" or "float32". - **dtype_acc** = ``"float64"`` : allowed only if dtype is "float32" or "float64".. use_double_acc (bool, default False): same as setting dtype_acc="float64" (only one of the two options can be set) If True, accumulate results of reduction in float64 variables, before casting to float32. This can only be set to True when data is in float32 or float64. It improves the accuracy of results in case of large sized data, but is slower. sum_scheme (string, default ``"auto"``): method used to sum up results for reductions. Default value "auto" will set this option to "block_red". Possible values are: - **sum_scheme** = ``"direct_sum"``: direct summation - **sum_scheme** = ``"block_sum"``: use an intermediate accumulator in each block before accumulating in the output. This improves accuracy for large sized data. - **sum_scheme** = ``"kahan_scheme"``: use Kahan summation algorithm to compensate for round-off errors. This improves accuracy for large sized data. enable_chunks (bool, default True): enable automatic selection of special "chunked" computation mode for accelerating reductions with formulas involving large dimension variables. """ if dtype: pyKeOps_Warning( "keyword argument dtype in KernelSolve is deprecated ; argument is ignored." ) reduction_op = "Sum" if opt_arg: self.formula = ( reduction_op + "_Reduction(" + formula + "," + str(opt_arg) + "," + str(axis2cat(axis)) + ")" ) else: self.formula = ( reduction_op + "_Reduction(" + formula + "," + str(axis2cat(axis)) + ")" ) optional_flags = get_optional_flags( reduction_op, dtype_acc, use_double_acc, sum_scheme, enable_chunks ) if rec_multVar_highdim: optional_flags["multVar_highdim"] = 1 else: optional_flags["multVar_highdim"] = 0 self.aliases = complete_aliases(formula, aliases) self.varinvalias = varinvalias if varinvalias[:4] == "Var(": # varinv is given directly as Var(*,*,*) so we just have to read the index varinvpos = int(varinvalias[4 : varinvalias.find(",")]) else: # we need to recover index from alias tmp = self.aliases.copy() for (i, s) in enumerate(tmp): tmp[i] = s[: s.find("=")].strip() varinvpos = tmp.index(varinvalias) self.varinvpos = varinvpos self.axis = axis self.reduction_op = reduction_op self.optional_flags = optional_flags def __call__( self, *args, backend="auto", device_id=-1, alpha=1e-10, eps=1e-6, ranges=None ): r""" To apply the routine on arbitrary NumPy arrays. Warning: Even for variables of size 1 (e.g. :math:`a_i\in\mathbb{R}` for :math:`i\in[0,M)`), KeOps expects inputs to be formatted as 2d arrays of size ``(M,dim)``. In practice, ``a.view(-1,1)`` should be used to turn a vector of weights into a *list of scalar values*. Args: *args (2d arrays (variables ``Vi(..)``, ``Vj(..)``) and 1d arrays (parameters ``Pm(..)``)): The input numerical arrays, which should all have the same ``dtype``, be **contiguous** and be stored on the **same device**. KeOps expects one array per alias, with the following compatibility rules: - All ``Vi(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`M`. - All ``Vj(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`N`. - All ``Pm(Dim_k)`` variables are encoded as **1d-arrays** (vectors) of size ``Dim_k``. Keyword Args: alpha (float, default = 1e-10): Non-negative **ridge regularization** parameter, added to the diagonal of the Kernel matrix :math:`K_{xx}`. backend (string): Specifies the map-reduce scheme, as detailed in the documentation of the :class:`numpy.Genred <pykeops.numpy.Genred>` module. device_id (int, default=-1): Specifies the GPU that should be used to perform the computation; a negative value lets your system choose the default GPU. This parameter is only useful if your system has access to several GPUs. ranges (6-uple of IntTensors, None by default): Ranges of integers that specify a :doc:`block-sparse reduction scheme <../../sparsity>` with *Mc clusters along axis 0* and *Nc clusters along axis 1*, as detailed in the documentation of the :class:`numpy.Genred <pykeops.numpy.Genred>` module. If **None** (default), we simply use a **dense Kernel matrix** as we loop over all indices :math:`i\in[0,M)` and :math:`j\in[0,N)`. Returns: (M,D) or (N,D) array: The solution of the optimization problem, which is always a **2d-array** with :math:`M` or :math:`N` lines (if **axis** = 1 or **axis** = 0, respectively) and a number of columns that is inferred from the **formula**. """ # Get tags tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args) # number of batch dimensions # N.B. we assume here that there is at least a cat=0 or cat=1 variable in the formula... nbatchdims = max(len(arg.shape) for arg in args) - 2 use_ranges = nbatchdims > 0 or ranges dtype = args[0].dtype.__str__() if device_id == -1: device_id = default_device_id if tagCPUGPU == 1 else -1 self.myconv = keops_binder["nvrtc" if tagCPUGPU else "cpp"]( tagCPUGPU, tag1D2D, tagHostDevice, use_ranges, device_id, self.formula, self.aliases, len(args), dtype, "numpy", self.optional_flags, ).import_module() varinv = args[self.varinvpos] def linop(var): newargs = args[: self.varinvpos] + (var,) + args[self.varinvpos + 1 :] nx, ny = get_sizes(self.aliases, *newargs) res = self.myconv.genred_numpy( -1, ranges, nx, ny, nbatchdims, None, *newargs ) if alpha: res += alpha * var return res return ConjugateGradientSolver("numpy", linop, varinv, eps=eps)
keops-main
pykeops/pykeops/numpy/operations.py
import numpy as np from pykeops.numpy import Genred, KernelSolve from pykeops.numpy.cluster import swap_axes as np_swap_axes import pykeops.config class numpytools: norm = np.linalg.norm arraysum = np.sum exp = np.exp log = np.log Genred = Genred KernelSolve = KernelSolve swap_axes = np_swap_axes arraytype = np.ndarray float_types = [float, np.float16, np.float32, np.float64] @staticmethod def is_tensor(x): return isinstance(x, np.ndarray) @staticmethod def copy(x): return np.copy(x) @staticmethod def eq(x, y): return np.equal(x, y) @staticmethod def transpose(x): return x.T @staticmethod def permute(x, *args): return x.transpose(*args) @staticmethod def contiguous(x): return np.ascontiguousarray(x) @staticmethod def numpy(x): return x @staticmethod def tile(*args): return np.tile(*args) @staticmethod def solve(*args): return np.linalg.solve(*args) @staticmethod def size(x): return x.size @staticmethod def view(x, s): return np.reshape(x, s) @staticmethod def long(x): return x.astype("int64") @staticmethod def dtype(x): return x.dtype @staticmethod def detect_complex(x): if type(x) == list: return any(type(v) == complex for v in x) elif type(x) == np.ndarray: return np.iscomplexobj(x) else: return type(x) == complex @staticmethod def view_as_complex(x): if x.dtype == "float32": return x.view("complex64") elif x.dtype == "float64": return x.view("complex128") @staticmethod def view_as_real(x): if x.dtype == "complex64": return x.view("float32") elif x.dtype == "complex128": return x.view("float64") @staticmethod def dtypename(dtype): return dtype.name @staticmethod def rand(m, n, dtype): return np.random.rand(m, n).astype(dtype) @staticmethod def randn(m, n, dtype): return np.random.randn(m, n).astype(dtype) @staticmethod def zeros(shape, dtype, device=None, requires_grad=None): return np.zeros(shape).astype(dtype) @staticmethod def empty(shape, dtype, device=None, requires_grad=None): return np.empty(shape).astype(dtype) @staticmethod def eye(n, dtype): return np.eye(n).astype(dtype) @staticmethod def array(x, dtype, device=None): return np.array(x).astype(dtype) @staticmethod def get_pointer(x): return x.__array_interface__["data"][0] @staticmethod def device(x): return "cpu" @staticmethod def device_type_index(x): return "cpu", None @staticmethod def device_dict(x): return dict(cat="cpu") def squared_distances(x, y): x_norm = (x**2).sum(1).reshape(-1, 1) y_norm = (y**2).sum(1).reshape(1, -1) dist = x_norm + y_norm - 2.0 * np.matmul(x, y.T) return dist def differences(x, y): return x.T[:, :, np.newaxis] - y.T[:, np.newaxis, :] def np_kernel_sphere(nalpha, nbeta, s, kernel): prs = nalpha @ nbeta.T if kernel == "binet": return prs**2 elif kernel == "linear": return prs elif kernel == "gaussian_unoriented": return np.exp((-2.0 + 2.0 * prs * prs) / (s * s)) elif kernel == "gaussian_oriented": return np.exp((-2.0 + 2.0 * prs) / (s * s)) def np_kernel(x, y, s, kernel): sq = squared_distances(x, y) if kernel == "gaussian": return np.exp(-sq / (s * s)) elif kernel == "laplacian": return np.exp(-np.sqrt(sq) / s) elif kernel == "cauchy": return 1.0 / (1 + sq / (s * s)) elif kernel == "inverse_multiquadric": return np.sqrt(1.0 / (1 + sq / (s * s))) def log_np_kernel(x, y, s, kernel): sq = squared_distances(x, y) if kernel == "gaussian": return -sq / (s * s) elif kernel == "laplacian": return -np.sqrt(sq) / s elif kernel == "cauchy": return -np.log(1 + sq / (s * s)) elif kernel == "inverse_multiquadric": return -0.5 * np.log(1.0 + sq / (s * s)) def grad_np_kernel(x, y, s, kernel): sq = squared_distances(x, y) if kernel == "gaussian": return -np.exp(-sq / (s * s)) / (s * s) elif kernel == "laplacian": t = -np.sqrt(sq / (s * s)) return np.exp(t) / (2 * s * s * t) elif kernel == "cauchy": return -1.0 / (s * (sq / (s * s) + 1)) ** 2 elif kernel == "inverse_multiquadric": return -0.5 / ((s**2) * ((sq / (s * s) + 1) ** 1.5)) def chain_rules(q, ax, by, Aa, p): res = np.zeros(ax.shape).astype("float32") for i in range(ax.shape[1]): # Computation of 2*|x_i -x_j|*exp(-|x_i -x_j|^2/(lam^2))/(lam^2) ximyj = np.tile(ax[:, i], [by.shape[0], 1]).T - np.tile( by[:, i], [ax.shape[0], 1] ) res[:, i] = np.sum(q * ((2 * ximyj * Aa) @ p), axis=1) return res def log_sum_exp(mat, axis=0): """ Computes the log-sum-exp of a matrix with a numerically stable scheme, in the user-defined summation dimension: exp is never applied to a number >= 0, and in each summation row, there is at least one "exp(0)" to stabilize the sum. For instance, if dim = 1 and mat is a 2d array, we output log( sum_j exp( mat[i,j] )) by factoring out the row-wise maximas. """ max_rc = mat.max(axis=axis) return max_rc + np.log( np.sum(np.exp(mat - np.expand_dims(max_rc, axis=axis)), axis=axis) ) def WarmUpGpu(): # dummy first calls for accurate timing in case of GPU use from pykeops.common.utils import pyKeOps_Message pyKeOps_Message("Warming up the Gpu (numpy bindings) !!!") if pykeops.config.gpu_available: formula = "Exp(-oos2*SqDist(x,y))*b" aliases = [ "x = Vi(1)", # First arg : i-variable, of size 1 "y = Vj(1)", # Second arg : j-variable, of size 1 "b = Vj(1)", # Third arg : j-variable, of size 1 "oos2 = Pm(1)", ] # Fourth arg : scalar parameter my_routine = Genred( formula, aliases, reduction_op="Sum", axis=1, dtype="float64" ) dum = np.random.rand(10, 1) dum2 = np.random.rand(10, 1) my_routine(dum, dum, dum2, np.array([1.0])) my_routine(dum, dum, dum2, np.array([1.0]))
keops-main
pykeops/pykeops/numpy/utils.py
import numpy as np from pykeops.common.utils import pyKeOps_Message formula = "SqNorm2(x - y)" var = ["x = Vi(3)", "y = Vj(3)"] expected_res = np.array([63.0, 90.0]) def test_numpy_bindings(): """ This function try to compile a simple keops formula using the numpy binder. """ x = np.arange(1, 10).reshape(-1, 3).astype("float32") y = np.arange(3, 9).reshape(-1, 3).astype("float32") import pykeops.numpy as pknp my_conv = pknp.Genred(formula, var) if np.allclose(my_conv(x, y).flatten(), expected_res): pyKeOps_Message("pyKeOps with numpy bindings is working!", use_tag=False) else: pyKeOps_Message("outputs wrong values...", use_tag=False)
keops-main
pykeops/pykeops/numpy/test_install.py
import numpy as np def from_matrix(ranges_i, ranges_j, keep): r"""Turns a boolean matrix into a KeOps-friendly **ranges** argument. This routine is a helper for the **block-sparse** reduction mode of KeOps, allowing you to turn clustering information (**ranges_i**, **ranges_j**) and a cluster-to-cluster boolean mask (**keep**) into integer tensors of indices that can be used to schedule the KeOps routines. Suppose that you're working with variables :math:`x_i` (:math:`i \in [0,10^6)`), :math:`y_j` (:math:`j \in [0,10^7)`), and that you want to compute a KeOps reduction over indices :math:`i` or :math:`j`: Instead of performing the full kernel dot product (:math:`10^6 \cdot 10^7 = 10^{13}` operations!), you may want to restrict yourself to interactions between points :math:`x_i` and :math:`y_j` that are "close" to each other. With KeOps, the simplest way of doing so is to: 1. Compute cluster labels for the :math:`x_i`'s and :math:`y_j`'s, using e.g. the :func:`grid_cluster` method. 2. Compute the ranges (**ranges_i**, **ranges_j**) and centroids associated to each cluster, using e.g. the :func:`cluster_ranges_centroids` method. 3. Sort the tensors ``x_i`` and ``y_j`` with :func:`sort_clusters` to make sure that the clusters are stored contiguously in memory (this step is **critical** for performance on GPUs). At this point: - the :math:`k`-th cluster of :math:`x_i`'s is given by ``x_i[ ranges_i[k,0]:ranges_i[k,1], : ]``, for :math:`k \in [0,M)`, - the :math:`\ell`-th cluster of :math:`y_j`'s is given by ``y_j[ ranges_j[l,0]:ranges_j[l,1], : ]``, for :math:`\ell \in [0,N)`. 4. Compute the :math:`(M,N)` matrix **dist** of pairwise distances between cluster centroids. 5. Apply a threshold on **dist** to generate a boolean matrix ``keep = dist < threshold``. 6. Define a KeOps reduction ``my_genred = Genred(..., axis = 0 or 1)``, as usual. 7. Compute the block-sparse reduction through ``result = my_genred(x_i, y_j, ranges = from_matrix(ranges_i,ranges_j,keep) )`` :func:`from_matrix` is thus the routine that turns a **high-level description** of your block-sparse computation (cluster ranges + boolean matrix) into a set of **integer tensors** (the **ranges** optional argument), used by KeOps to schedule computations on the GPU. Args: ranges_i ((M,2) integer array): List of :math:`[\text{start}_k,\text{end}_k)` indices. For :math:`k \in [0,M)`, the :math:`k`-th cluster of ":math:`i`" variables is given by ``x_i[ ranges_i[k,0]:ranges_i[k,1], : ]``, etc. ranges_j ((N,2) integer array): List of :math:`[\text{start}_\ell,\text{end}_\ell)` indices. For :math:`\ell \in [0,N)`, the :math:`\ell`-th cluster of ":math:`j`" variables is given by ``y_j[ ranges_j[l,0]:ranges_j[l,1], : ]``, etc. keep ((M,N) boolean array): If the output ``ranges`` of :func:`from_matrix` is used in a KeOps reduction, we will only compute and reduce the terms associated to pairs of "points" :math:`x_i`, :math:`y_j` in clusters :math:`k` and :math:`\ell` if ``keep[k,l] == 1``. Returns: A 6-uple of integer arrays that can be used as an optional **ranges** argument of :func:`Genred <pykeops.numpy.Genred>`. See the documentation of :func:`Genred <pykeops.numpy.Genred>` for reference. Example: >>> r_i = np.array( [ [2,5], [7,12] ], dtype=int ) # 2 clusters: X[0] = x_i[2:5], X[1] = x_i[7:12] >>> r_j = np.array( [ [1,4], [4,9], [20,30] ], dtype=int ) # 3 clusters: Y[0] = y_j[1:4], Y[1] = y_j[4:9], Y[2] = y_j[20:30] >>> x,y = np.array([1., 0.]), np.array([1.5, .5, 2.5]) # dummy "centroids" >>> dist = (x[:,None] - y[None,:])**2 >>> keep = (dist <= 1) # (2,3) matrix >>> print(keep) [[ True True False] [False True False]] --> X[0] interacts with Y[0] and Y[1], X[1] interacts with Y[1] >>> (ranges_i,slices_i,redranges_j, ranges_j,slices_j,redranges_i) = from_matrix(r_i,r_j,keep) --> (ranges_i,slices_i,redranges_j) will be used for reductions with respect to "j" (axis=1) --> (ranges_j,slices_j,redranges_i) will be used for reductions with respect to "i" (axis=0) Information relevant if **axis** = 1: >>> print(ranges_i) # = r_i [[ 2, 5], [ 7, 12]] --> Two "target" clusters in a reduction wrt. j >>> print(slices_i) [2, 3] --> X[0] is associated to redranges_j[0:2] --> X[1] is associated to redranges_j[2:3] >>> print(redranges_j) [[1, 4], [4, 9], [4, 9]] --> For X[0], i in [2,3,4], we'll reduce over j in [1,2,3] and [4,5,6,7,8] --> For X[1], i in [7,8,9,10,11], we'll reduce over j in [4,5,6,7,8] Information relevant if **axis** = 0: >>> print(ranges_j) [[ 1, 4], [ 4, 9], [20, 30]] --> Three "target" clusters in a reduction wrt. i >>> print(slices_j) [1, 3, 3] --> Y[0] is associated to redranges_i[0:1] --> Y[1] is associated to redranges_i[1:3] --> Y[2] is associated to redranges_i[3:3] = no one... >>> print(redranges_i) [[ 2, 5], [ 2, 5], [ 7, 12]] --> For Y[0], j in [1,2,3], we'll reduce over i in [2,3,4] --> For Y[1], j in [4,5,6,7,8], we'll reduce over i in [2,3,4] and [7,8,9,10,11] --> For Y[2], j in [20,21,...,29], there is no reduction to be done """ J, I = np.meshgrid(np.arange(0, keep.shape[1]), np.arange(0, keep.shape[0])) redranges_i = ranges_i[ I.T[keep.T] ] # Use PyTorch indexing to "stack" copies of ranges_i[...] redranges_j = ranges_j[J[keep]] slices_i = np.cumsum( np.sum(keep, axis=1), axis=0 ) # slice indices in the "stacked" array redranges_j slices_j = np.cumsum( np.sum(keep, axis=0), axis=0 ) # slice indices in the "stacked" array redranges_i return ( ranges_i.astype("int32"), slices_i.astype("int32"), redranges_j.astype("int32"), ranges_j.astype("int32"), slices_j.astype("int32"), redranges_i.astype("int32"), )
keops-main
pykeops/pykeops/numpy/cluster/matrix.py
import numpy as np def grid_cluster(x, size): r"""Simplistic clustering algorithm which distributes points into cubic bins. Args: x ((M,D) array): List of points :math:`x_i \in \mathbb{R}^D`. size (float or (D,) array): Dimensions of the cubic cells ("voxels"). Returns: (M,) integer array: Vector of integer **labels**. Two points ``x[i]`` and ``x[j]`` are in the same cluster if and only if ``labels[i] == labels[j]``. Labels are sorted in a compact range :math:`[0,C)`, where :math:`C` is the number of non-empty cubic cells. Example: >>> x = np.array([ [0.], [.1], [.9], [.05], [.5] ]) # points in the unit interval >>> labels = grid_cluster(x, .2) # bins of size .2 >>> print( labels ) [0, 0, 2, 0, 1] """ # Quantize the points' positions: if x.shape[1] == 1: weights = np.array([1], dtype=int) elif x.shape[1] == 2: weights = np.array([2**10, 1], dtype=int) elif x.shape[1] == 3: weights = np.array([2**20, 2**10, 1], dtype=int) else: raise NotImplementedError() x_ = np.floor(x / size).astype(int) x_ *= weights lab = np.sum(x_, axis=1) # labels lab = lab - np.min(lab) # Replace arbitrary labels with unique identifiers in a compact arange: u_lab = np.sort(np.unique(lab)) N_lab = len(u_lab) foo = np.empty(np.max(u_lab) + 1, dtype=int) foo[u_lab] = np.arange(N_lab, dtype=int) lab = foo[lab] return lab
keops-main
pykeops/pykeops/numpy/cluster/grid_cluster.py
from .grid_cluster import grid_cluster from .matrix import from_matrix from .utils import ( sort_clusters, cluster_ranges, cluster_centroids, cluster_ranges_centroids, swap_axes, ) # N.B.: the order is important for the autodoc in sphinx! __all__ = sorted( [ "grid_cluster", "from_matrix", "sort_clusters", "cluster_ranges", "cluster_centroids", "cluster_ranges_centroids", "swap_axes", ] )
keops-main
pykeops/pykeops/numpy/cluster/__init__.py
import numpy as np def sort_clusters(x, lab): r"""Sorts a list of points and labels to make sure that the clusters are contiguous in memory. On the GPU, **contiguous memory accesses** are key to high performances. By making sure that points in the same cluster are stored next to each other in memory, this pre-processing routine allows KeOps to compute block-sparse reductions with maximum efficiency. Args: x ((M,D) array or tuple/list of (M,..) arrays): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) integer arrays): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Returns: (M,D) array or tuple/list of (M,..) arrays, (M,) integer array: Sorted **point cloud(s)** and **vector of labels**. Example: >>> x = np.array([ [0.], [5.], [.4], [.3], [2.] ]) >>> lab = np.array([ 0, 2, 0, 0, 1 ], dtype=int) >>> x_sorted, lab_sorted = sort_clusters(x, lab) >>> print(x_sorted) [[0. ] [0.4] [0.3] [2. ] [5. ]] >>> print(lab_sorted) [0 0 0 1 2] """ perm = np.argsort(lab.ravel()) lab = lab[perm] if type(x) is tuple: x_sorted = tuple(a[perm] for a in x) elif type(x) is list: x_sorted = list(a[perm] for a in x) else: x_sorted = x[perm] return x_sorted, lab def cluster_ranges(lab, Nlab=None): r"""Computes the ``[start,end)`` indices that specify clusters in a sorted point cloud. If **lab** denotes a vector of labels :math:`\ell_i\in[0,C)`, :func:`sort_clusters` allows us to sort our point clouds and make sure that points that share the same label are stored next to each other in memory. :func:`cluster_ranges` is simply there to give you the **slice indices** that correspond to each of those :math:`C` classes. Args: x ((M,D) array): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) integer array): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Keyword Args: Nlab ((C,) integer array, optional): If you have computed it already, you may specify the number of points per class through this integer vector of length :math:`C`. Returns: (C,2) integer array: Stacked array of :math:`[\text{start}_k,\text{end}_k)` indices in :math:`[0,M]`, for :math:`k\in[0,C)`. Example: >>> x = np.array([ [0.], [5.], [.4], [.3], [2.] ]) >>> lab = np.array([ 0, 2, 0, 0, 1 ], dtype=int) >>> x_sorted, lab_sorted = sort_clusters(x, lab) >>> print(x_sorted) [[0. ] [0.4] [0.3] [2. ] [5. ]] >>> print(lab_sorted) [0 0 0 1 2] >>> ranges_i = cluster_ranges(lab) >>> print( ranges_i ) [[0 3] [3 4] [4 5]] --> cluster 0 = x_sorted[0:3, :] --> cluster 1 = x_sorted[3:4, :] --> cluster 2 = x_sorted[4:5, :] """ if Nlab is None: Nlab = np.bincount(lab) pivots = np.concatenate((np.array([0]), np.cumsum(Nlab, axis=0))) return np.stack((pivots[:-1], pivots[1:]), axis=1).astype(int) def cluster_centroids(x, lab, Nlab=None, weights=None, weights_c=None): r"""Computes the (weighted) centroids of classes specified by a vector of labels. If points :math:`x_i \in\mathbb{R}^D` are assigned to :math:`C` different classes by the vector of integer labels :math:`\ell_i \in [0,C)`, this function returns a collection of :math:`C` centroids .. math:: c_k = \frac{\sum_{i, \ell_i = k} w_i\cdot x_i}{\sum_{i, \ell_i=k} w_i}, where the weights :math:`w_i` are set to 1 by default. Args: x ((M,D) array): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) integer array): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Keyword Args: Nlab ((C,) integer array): Number of points per class. Recomputed if None. weights ((N,) array): Positive weights :math:`w_i` of each point. weights_c ((C,) array): Total weight of each class. Recomputed if None. Returns: (C,D) array: List of centroids :math:`c_k \in \mathbb{R}^D`. Example: >>> x = np.array([ [0.], [1.], [4.], [5.], [6.] ]) >>> lab = np.array([ 0, 0, 1, 1, 1 ]) >>> weights = np.array([ .5, .5, 2., 1., 1. ]) >>> centroids = cluster_centroids(x, lab, weights=weights) >>> print(centroids) [[0.5 ] [4.75]] """ if Nlab is None: Nlab = np.bincount(lab).astype(float) if weights is not None and weights_c is None: weights_c = np.bincount(lab, weights=weights)[:, None] c = np.zeros((len(Nlab), x.shape[1]), dtype=x.dtype) for d in range(x.shape[1]): if weights is None: c[:, d] = np.bincount(lab, weights=x[:, d]) / Nlab else: c[:, d] = ( np.bincount(lab, weights=x[:, d] * weights.ravel()) / weights_c.ravel() ) return c def cluster_ranges_centroids(x, lab, weights=None): r"""Computes the cluster indices and centroids of a (weighted) point cloud with labels. If **x** and **lab** encode a cloud of points :math:`x_i\in\mathbb{R}^D` with labels :math:`\ell_i\in[0,C)`, for :math:`i\in[0,M)`, this routine returns: - Ranges :math:`[\text{start}_k,\text{end}_k)` compatible with :func:`sort_clusters` for :math:`k\in[0,C)`. - Centroids :math:`c_k` for each cluster :math:`k`, computed as barycenters using the weights :math:`w_i \in \mathbb{R}_{>0}`: .. math:: c_k = \frac{\sum_{i, \ell_i=k} w_i\cdot \ell_i}{\sum_{i, \ell_i=k} w_i} - Total weights :math:`\sum_{i, \ell_i=k} w_i`, for :math:`k\in[0,C)`. The weights :math:`w_i` can be given through a vector **weights** of size :math:`M`, and are set by default to 1 for all points in the cloud. Args: x ((M,D) array): List of points :math:`x_i \in \mathbb{R}^D`. lab ((M,) integer array): Vector of class labels :math:`\ell_i\in\mathbb{N}`. Keyword Args: weights ((M,) array): Positive weights :math:`w_i` that can be used to compute our barycenters. Returns: (C,2) integer array, (C,D) array, (C,) array: **ranges** - Stacked array of :math:`[\text{start}_k,\text{end}_k)` indices in :math:`[0,M]`, for :math:`k\in[0,C)`, compatible with the :func:`sort_clusters` routine. **centroids** - List of centroids :math:`c_k \in \mathbb{R}^D`. **weights_c** - Total weight of each cluster. Example: >>> x = np.array([[0.], [.5], [1.], [2.], [3.] ]) >>> lab = np.array([ 0, 0, 1, 1, 1 ], dtype=int) >>> ranges, centroids, weights_c = cluster_ranges_centroids(x, lab) >>> print(ranges) [[0 2] [2 5]] --> cluster 0 = x[0:2, :] --> cluster 1 = x[2:5, :] >>> print(centroids) [[0.25] [2. ]] >>> print(weights_c) [2. 3.] >>> weights = np.array([ 1., .5, 1., 1., 10. ]) >>> ranges, centroids, weights_c = cluster_ranges_centroids(x, lab, weights=weights) >>> print(ranges) [[0 2] [2 5]] --> cluster 0 = x[0:2, :] --> cluster 1 = x[2:5, :] >>> print(centroids) [[0.16666667] [2.75 ]] >>> print(weights_c) [ 1.5 12. ] """ Nlab = np.bincount(lab).astype(float) if weights is not None: w_c = np.bincount(lab, weights=weights).ravel() return ( cluster_ranges(lab, Nlab), cluster_centroids(x, lab, Nlab, weights=weights, weights_c=w_c), w_c, ) else: return cluster_ranges(lab, Nlab), cluster_centroids(x, lab, Nlab), Nlab def swap_axes(ranges): r"""Swaps the ":math:`i`" and ":math:`j`" axes of a reduction's optional **ranges** parameter. This function returns **None** if **ranges** is **None**, and swaps the :math:`i` and :math:`j` arrays of indices otherwise.""" if ranges is None: return None else: return (*ranges[3:6], *ranges[0:3])
keops-main
pykeops/pykeops/numpy/cluster/utils.py
keops-main
pykeops/pykeops/numpy/lazytensor/__init__.py
import numpy as np from pykeops.common.lazy_tensor import GenericLazyTensor, ComplexGenericLazyTensor from pykeops.numpy.utils import numpytools # Convenient aliases: def Var(x_or_ind, dim=None, cat=None): if dim is None: # init via data: we assume x_or_ind is data return LazyTensor(x_or_ind, axis=cat) else: # init via symbolic variable given as triplet (ind,dim,cat) return LazyTensor((x_or_ind, dim, cat)) def Vi(x_or_ind, dim=None): r""" Simple wrapper that returns an instantiation of :class:`LazyTensor` of type 0. """ return Var(x_or_ind, dim, 0) def Vj(x_or_ind, dim=None): r""" Simple wrapper that returns an instantiation of :class:`LazyTensor` of type 1. """ return Var(x_or_ind, dim, 1) def Pm(x_or_ind, dim=None): r""" Simple wrapper that returns an instantiation of :class:`LazyTensor` of type 2. """ return Var(x_or_ind, dim, 2) class LazyTensor(GenericLazyTensor): r"""Symbolic wrapper for NumPy arrays. :class:`LazyTensor` encode numerical arrays through the combination of a symbolic, **mathematical formula** and a list of **small data arrays**. They can be used to implement efficient algorithms on objects that are **easy to define**, but **impossible to store** in memory (e.g. the matrix of pairwise distances between two large point clouds). :class:`LazyTensor` may be created from standard NumPy arrays or PyTorch tensors, combined using simple mathematical operations and converted back to NumPy arrays or PyTorch tensors with efficient reduction routines, which outperform standard tensorized implementations by two orders of magnitude. """ def __new__(self, x=None, axis=None, is_complex=False): if is_complex or numpytools.detect_complex(x): return ComplexLazyTensor(x, axis) else: return object.__new__(self) def __init__(self, x=None, axis=None, is_complex=False): super().__init__(x=x, axis=axis) def get_tools(self): self.tools = numpytools self.Genred = numpytools.Genred self.KernelSolve = numpytools.KernelSolve def lt_constructor(self, x=None, axis=None, is_complex=False): return LazyTensor(x=x, axis=axis, is_complex=is_complex) class ComplexLazyTensor(ComplexGenericLazyTensor): r"""Extension of the LazyTensor class for complex operations.""" def __init__(self, x=None, axis=None): super().__init__(x=x, axis=axis) def get_tools(self): self.tools = numpytools self.Genred = numpytools.Genred self.KernelSolve = numpytools.KernelSolve def lt_constructor(self, x=None, axis=None, is_complex=True): return LazyTensor(x=x, axis=axis, is_complex=is_complex)
keops-main
pykeops/pykeops/numpy/lazytensor/LazyTensor.py
import numpy as np from pykeops.common.get_options import get_tag_backend from pykeops.common.operations import preprocess, postprocess from pykeops.common.parse_type import get_sizes, complete_aliases, get_optional_flags from pykeops.common.utils import axis2cat from pykeops import default_device_id from pykeops.common.utils import pyKeOps_Warning class Genred: r""" Creates a new generic operation. This is KeOps' main function, whose usage is documented in the :doc:`user-guide <../../Genred>`, the :doc:`gallery of examples <../../../_auto_examples/index>` and the :doc:`high-level tutorials <../../../_auto_tutorials/index>`. Taking as input a handful of strings and integers that specify a custom Map-Reduce operation, it returns a C++ wrapper that can be called just like any other NumPy function. Note: On top of the **Sum** and **LogSumExp** reductions, KeOps supports :ref:`variants of the ArgKMin reduction <part.reduction>` that can be used to implement k-nearest neighbor search. These routines return indices encoded as **floating point numbers**, and produce no gradient. Fortunately though, you can simply turn them into ``LongTensors`` and use them to index your arrays, as showcased in the documentation of :func:`generic_argmin() <pykeops.numpy.generic_argmin>`, :func:`generic_argkmin() <pykeops.numpy.generic_argkmin>` and in the :doc:`K-means tutorial <../../../_auto_tutorials/kmeans/plot_kmeans_numpy>`. Example: >>> my_conv = Genred('Exp(-SqNorm2(x - y))', # formula ... ['x = Vi(3)', # 1st input: dim-3 vector per line ... 'y = Vj(3)'], # 2nd input: dim-3 vector per column ... reduction_op='Sum', # we also support LogSumExp, Min, etc. ... axis=1) # reduce along the lines of the kernel matrix >>> # Apply it to 2d arrays x and y with 3 columns and a (huge) number of lines >>> x = np.random.randn(1000000, 3) >>> y = np.random.randn(2000000, 3) >>> a = my_conv(x, y) # a_i = sum_j exp(-|x_i-y_j|^2) >>> print(a.shape) [1000000, 1] """ def __init__( self, formula, aliases, reduction_op="Sum", axis=0, dtype=None, opt_arg=None, formula2=None, cuda_type=None, dtype_acc="auto", use_double_acc=False, sum_scheme="auto", enable_chunks=True, rec_multVar_highdim=False, ): r""" Instantiate a new generic operation. Note: :class:`Genred` relies on C++ or CUDA kernels that are compiled on-the-fly, and stored in a :ref:`cache directory <part.cache>` as shared libraries (".so" files) for later use. Args: formula (string): The scalar- or vector-valued expression that should be computed and reduced. The correct syntax is described in the :doc:`documentation <../../Genred>`, using appropriate :doc:`mathematical operations <../../../api/math-operations>`. aliases (list of strings): A list of identifiers of the form ``"AL = TYPE(DIM)"`` that specify the categories and dimensions of the input variables. Here: - ``AL`` is an alphanumerical alias, used in the **formula**. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0. - ``Vj``: indexation by :math:`j` along axis 1. - ``Pm``: no indexation, the input tensor is a *vector* and not a 2d array. - ``DIM`` is an integer, the dimension of the current variable. As described below, :meth:`__call__` will expect as input Tensors whose shape are compatible with **aliases**. Keyword Args: reduction_op (string, default = ``"Sum"``): Specifies the reduction operation that is applied to reduce the values of ``formula(x_i, y_j, ...)`` along axis 0 or axis 1. The supported values are one of :ref:`part.reduction` axis (int, default = 0): Specifies the dimension of the "kernel matrix" that is reduced by our routine. The supported values are: - **axis** = 0: reduction with respect to :math:`i`, outputs a ``Vj`` or ":math:`j`" variable. - **axis** = 1: reduction with respect to :math:`j`, outputs a ``Vi`` or ":math:`i`" variable. opt_arg (int, default = None): If **reduction_op** is in ``["KMin", "ArgKMin", "KMinArgKMin"]``, this argument allows you to specify the number ``K`` of neighbors to consider. dtype_acc (string, default ``"auto"``): type for accumulator of reduction, before casting to dtype. It improves the accuracy of results in case of large sized data, but is slower. Default value "auto" will set this option to the value of dtype. The supported values are: - **dtype_acc** = ``"float16"`` : allowed only if dtype is "float16". - **dtype_acc** = ``"float32"`` : allowed only if dtype is "float16" or "float32". - **dtype_acc** = ``"float64"`` : allowed only if dtype is "float32" or "float64".. use_double_acc (bool, default False): same as setting dtype_acc="float64" (only one of the two options can be set) If True, accumulate results of reduction in float64 variables, before casting to float32. This can only be set to True when data is in float32 or float64. It improves the accuracy of results in case of large sized data, but is slower. sum_scheme (string, default ``"auto"``): method used to sum up results for reductions. This option may be changed only when reduction_op is one of: "Sum", "MaxSumShiftExp", "LogSumExp", "Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight". Default value "auto" will set this option to "block_red" for these reductions. Possible values are: - **sum_scheme** = ``"direct_sum"``: direct summation - **sum_scheme** = ``"block_sum"``: use an intermediate accumulator in each block before accumulating in the output. This improves accuracy for large sized data. - **sum_scheme** = ``"kahan_scheme"``: use Kahan summation algorithm to compensate for round-off errors. This improves accuracy for large sized data. enable_chunks (bool, default True): enable automatic selection of special "chunked" computation mode for accelerating reductions with formulas involving large dimension variables. rec_multVar_highdim (bool, default False): for Gpu mode only, enable special "final chunked" computation mode for accelerating reductions with formulas involving large dimension variables. Beware ! This will only work if the formula has the very special form that allows such computation mode. """ if dtype: pyKeOps_Warning( "keyword argument dtype in Genred is deprecated ; argument is ignored." ) if cuda_type: pyKeOps_Warning( "keyword argument cuda_type in Genred is deprecated ; argument is ignored." ) self.reduction_op = reduction_op reduction_op_internal, formula2 = preprocess(reduction_op, formula2) self.optional_flags = get_optional_flags( reduction_op_internal, dtype_acc, use_double_acc, sum_scheme, enable_chunks, ) if rec_multVar_highdim: self.optional_flags["multVar_highdim"] = 1 else: self.optional_flags["multVar_highdim"] = 0 str_opt_arg = "," + str(opt_arg) if opt_arg else "" str_formula2 = "," + formula2 if formula2 else "" self.formula = ( reduction_op_internal + "_Reduction(" + formula + str_opt_arg + "," + str(axis2cat(axis)) + str_formula2 + ")" ) self.aliases = complete_aliases(self.formula, aliases) self.axis = axis self.opt_arg = opt_arg def __call__(self, *args, backend="auto", device_id=-1, ranges=None, out=None): r""" Apply the routine on arbitrary NumPy arrays. .. warning:: Even for variables of size 1 (e.g. :math:`a_i\in\mathbb{R}` for :math:`i\in[0,M)`), KeOps expects inputs to be formatted as 2d Tensors of size ``(M,dim)``. In practice, ``a.view(-1,1)`` should be used to turn a vector of weights into a *list of scalar values*. Args: *args (2d arrays (variables ``Vi(..)``, ``Vj(..)``) and 1d arrays (parameters ``Pm(..)``)): The input numerical arrays, which should all have the same ``dtype``, be **contiguous** and be stored on the **same device**. KeOps expects one array per alias, with the following compatibility rules: - All ``Vi(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`M`. - All ``Vj(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`N`. - All ``Pm(Dim_k)`` variables are encoded as **1d-arrays** (vectors) of size ``Dim_k``. Keyword Args: backend (string): Specifies the map-reduce scheme. The supported values are: - ``"auto"`` (default): let KeOps decide which backend is best suited to your data, based on the tensors' shapes. ``"GPU_1D"`` will be chosen in most cases. - ``"CPU"``: use a simple C++ ``for`` loop on a single CPU core. - ``"GPU_1D"``: use a `simple multithreading scheme <https://github.com/getkeops/keops/blob/main/keops/core/GpuConv1D.cu>`_ on the GPU - basically, one thread per value of the output index. - ``"GPU_2D"``: use a more sophisticated `2D parallelization scheme <https://github.com/getkeops/keops/blob/main/keops/core/GpuConv2D.cu>`_ on the GPU. - ``"GPU"``: let KeOps decide which one of the ``"GPU_1D"`` or the ``"GPU_2D"`` scheme will run faster on the given input. device_id (int, default=-1): Specifies the GPU that should be used to perform the computation; a negative value lets your system choose the default GPU. This parameter is only useful if your system has access to several GPUs. ranges (6-uple of integer arrays, None by default): Ranges of integers that specify a :doc:`block-sparse reduction scheme <../../sparsity>` with *Mc clusters along axis 0* and *Nc clusters along axis 1*. If None (default), we simply loop over all indices :math:`i\in[0,M)` and :math:`j\in[0,N)`. **The first three ranges** will be used if **axis** = 1 (reduction along the axis of ":math:`j` variables"), and to compute gradients with respect to ``Vi(..)`` variables: - ``ranges_i``, (Mc,2) integer array - slice indices :math:`[\operatorname{start}^I_k,\operatorname{end}^I_k)` in :math:`[0,M]` that specify our Mc blocks along the axis 0 of ":math:`i` variables". - ``slices_i``, (Mc,) integer array - consecutive slice indices :math:`[\operatorname{end}^S_1, ..., \operatorname{end}^S_{M_c}]` that specify Mc ranges :math:`[\operatorname{start}^S_k,\operatorname{end}^S_k)` in ``redranges_j``, with :math:`\operatorname{start}^S_k = \operatorname{end}^S_{k-1}`. **The first 0 is implicit**, meaning that :math:`\operatorname{start}^S_0 = 0`, and we typically expect that ``slices_i[-1] == len(redrange_j)``. - ``redranges_j``, (Mcc,2) integer array - slice indices :math:`[\operatorname{start}^J_\ell,\operatorname{end}^J_\ell)` in :math:`[0,N]` that specify reduction ranges along the axis 1 of ":math:`j` variables". If **axis** = 1, these integer arrays allow us to say that ``for k in range(Mc)``, the output values for indices ``i in range( ranges_i[k,0], ranges_i[k,1] )`` should be computed using a Map-Reduce scheme over indices ``j in Union( range( redranges_j[l, 0], redranges_j[l, 1] ))`` for ``l in range( slices_i[k-1], slices_i[k] )``. **Likewise, the last three ranges** will be used if **axis** = 0 (reduction along the axis of ":math:`i` variables"), and to compute gradients with respect to ``Vj(..)`` variables: - ``ranges_j``, (Nc,2) integer array - slice indices :math:`[\operatorname{start}^J_k,\operatorname{end}^J_k)` in :math:`[0,N]` that specify our Nc blocks along the axis 1 of ":math:`j` variables". - ``slices_j``, (Nc,) integer array - consecutive slice indices :math:`[\operatorname{end}^S_1, ..., \operatorname{end}^S_{N_c}]` that specify Nc ranges :math:`[\operatorname{start}^S_k,\operatorname{end}^S_k)` in ``redranges_i``, with :math:`\operatorname{start}^S_k = \operatorname{end}^S_{k-1}`. **The first 0 is implicit**, meaning that :math:`\operatorname{start}^S_0 = 0`, and we typically expect that ``slices_j[-1] == len(redrange_i)``. - ``redranges_i``, (Ncc,2) integer array - slice indices :math:`[\operatorname{start}^I_\ell,\operatorname{end}^I_\ell)` in :math:`[0,M]` that specify reduction ranges along the axis 0 of ":math:`i` variables". If **axis** = 0, these integer arrays allow us to say that ``for k in range(Nc)``, the output values for indices ``j in range( ranges_j[k,0], ranges_j[k,1] )`` should be computed using a Map-Reduce scheme over indices ``i in Union( range( redranges_i[l, 0], redranges_i[l, 1] ))`` for ``l in range( slices_j[k-1], slices_j[k] )``. out (2d array, None by default): The output numerical array, for in-place computation. If provided, the output array should all have the same ``dtype``, be **contiguous** and be stored on the **same device** as the arguments. Moreover it should have the correct shape for the output. Returns: (M,D) or (N,D) array: The output of the reduction, a **2d-tensor** with :math:`M` or :math:`N` lines (if **axis** = 1 or **axis** = 0, respectively) and a number of columns that is inferred from the **formula**. """ # Get tags tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args) # number of batch dimensions # N.B. we assume here that there is at least a cat=0 or cat=1 variable in the formula... nbatchdims = max(len(arg.shape) for arg in args) - 2 use_ranges = nbatchdims > 0 or ranges dtype = args[0].dtype.__str__() if device_id == -1: device_id = default_device_id if tagCPUGPU == 1 else -1 from pykeops.common.keops_io import keops_binder self.myconv = keops_binder["nvrtc" if tagCPUGPU else "cpp"]( tagCPUGPU, tag1D2D, tagHostDevice, use_ranges, device_id, self.formula, self.aliases, len(args), dtype, "numpy", self.optional_flags, ).import_module() # N.B.: KeOps C++ expects contiguous data arrays test_contig = all(arg.flags["C_CONTIGUOUS"] for arg in args) if not test_contig: pyKeOps_Warning( "at least one of the input tensors is not contiguous. " + "Consider using contiguous data arrays to avoid unnecessary copies." ) args = tuple(np.ascontiguousarray(arg) for arg in args) # N.B.: KeOps C++ expects contiguous integer arrays as ranges if ranges: ranges = tuple(np.ascontiguousarray(r) for r in ranges) nx, ny = get_sizes(self.aliases, *args) nout, nred = (nx, ny) if self.axis == 1 else (ny, nx) if "Arg" in self.reduction_op: # when using Arg type reductions, # if nred is greater than 16 millions and dtype=float32, the result is not reliable # because we encode indices as floats, so we raise an exception ; # same with float16 type and nred>2048 if nred > 1.6e7 and dtype in ("float32", "float"): raise ValueError( "size of input array is too large for Arg type reduction with single precision. Use double precision." ) elif nred > 2048 and dtype in ("float16", "half"): raise ValueError( "size of input array is too large for Arg type reduction with float16 dtype.." ) out = self.myconv.genred_numpy(-1, ranges, nx, ny, nbatchdims, out, *args) return postprocess(out, "numpy", self.reduction_op, nout, self.opt_arg, dtype)
keops-main
pykeops/pykeops/numpy/generic/generic_red.py
keops-main
pykeops/pykeops/numpy/generic/__init__.py
from pykeops.common.parse_type import get_type from pykeops.common.utils import cat2axis from pykeops.numpy import Genred def generic_sum(formula, output, *aliases, **kwargs): r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with a "Sum" reduction. Args: formula (string): Symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. output (string): An identifier of the form ``"AL = TYPE(DIM)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. - ``DIM`` is an integer, the dimension of the output variable; it should be compatible with **formula**. *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. Keyword Args: dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. The supported values are: - **dtype** = ``"float16"``, - **dtype** = ``"float32"``, - **dtype** = ``"float64"``. Returns: A generic reduction that can be called on arbitrary NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`. Example: >>> my_conv = generic_sum( # Custom Kernel Density Estimator ... 'Exp(-SqNorm2(x - y))', # Formula ... 'a = Vi(1)', # Output: 1 scalar per line ... 'x = Vi(3)', # 1st input: dim-3 vector per line ... 'y = Vj(3)') # 2nd input: dim-3 vector per line >>> # Apply it to 2d arrays x and y with 3 columns and a (huge) number of lines >>> x = np.random.randn(1000000, 3) >>> y = np.random.randn(2000000, 3) >>> a = my_conv(x, y) # a_i = sum_j exp(-|x_i-y_j|^2) >>> print(a.shape) (1000000, 1) """ _, cat, _, _ = get_type(output) return Genred( formula, list(aliases), reduction_op="Sum", axis=cat2axis(cat), **kwargs ) def generic_logsumexp(formula, output, *aliases, **kwargs): r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with a "LogSumExp" reduction. Args: formula (string): Scalar-valued symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. output (string): An identifier of the form ``"AL = TYPE(1)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. Keyword Args: dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. The supported values are: - **dtype** = ``"float16"``, - **dtype** = ``"float32"``, - **dtype** = ``"float64"``. Returns: A generic reduction that can be called on arbitrary NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`. Example: Log-likelihood of a Gaussian Mixture Model, .. math:: a_i = f(x_i) &= \log \sum_{j=1}^{N} \exp(-\gamma\cdot\|x_i-y_j\|^2)\cdot b_j \\ &= \log \sum_{j=1}^{N} \exp\big(-\gamma\cdot\|x_i-y_j\|^2 \,+\, \log(b_j) \big). >>> log_likelihood = generic_logsumexp( ... '(-(g * SqNorm2(x - y))) + b', # Formula ... 'a = Vi(1)', # Output: 1 scalar per line ... 'x = Vi(3)', # 1st input: dim-3 vector per line ... 'y = Vj(3)', # 2nd input: dim-3 vector per line ... 'g = Pm(1)', # 3rd input: vector of size 1 ... 'b = Vj(1)') # 4th input: 1 scalar per line >>> x = np.random.randn(1000000, 3) >>> y = np.random.randn(2000000, 3) >>> g = np.array([.5]) # Parameter of our GMM >>> b = np.random.rand(2000000, 1) # Positive weights... >>> b = b / b.sum() # Normalized to get a probability measure >>> a = log_likelihood(x, y, g, np.log(b)) # a_i = log sum_j exp(-g*|x_i-y_j|^2) * b_j >>> print(a.shape) (1000000, 1) """ _, cat, _, _ = get_type(output) return Genred( formula, list(aliases), reduction_op="LogSumExp", axis=cat2axis(cat), **kwargs ) def generic_argkmin(formula, output, *aliases, **kwargs): r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with an "ArgKMin" reduction. Args: formula (string): Scalar-valued symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. output (string): An identifier of the form ``"AL = TYPE(K)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. - ``K`` is an integer, the number of values to extract. *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. Keyword Args: dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. The supported values are: - **dtype** = ``"float16"``, - **dtype** = ``"float32"``, - **dtype** = ``"float64"``. Returns: A generic reduction that can be called on arbitrary NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`. Example: Bruteforce K-nearest neighbors search in dimension 100: >>> knn = generic_argkmin( ... 'SqDist(x, y)', # Formula ... 'a = Vi(3)', # Output: 3 scalars per line ... 'x = Vi(100)', # 1st input: dim-100 vector per line ... 'y = Vj(100)') # 2nd input: dim-100 vector per line >>> x = np.random.randn(5, 100) >>> y = np.random.randn(20000, 100) >>> a = knn(x, y) >>> print(a) [[ 9054., 11653., 11614.], [13466., 11903., 14180.], [14164., 8809., 3799.], [ 2092., 3323., 18479.], [14433., 11315., 11841.]] >>> print( np.linalg.norm(x - y[ a[:,0].astype(int) ], axis=1) ) # Distance to the nearest neighbor [10.7933, 10.3235, 10.1218, 11.4919, 10.5100] >>> print( np.linalg.norm(x - y[ a[:,1].astype(int) ], axis=1) ) # Distance to the second neighbor [11.3702, 10.6550, 10.7646, 11.5676, 11.1356] >>> print( np.linalg.norm(x - y[ a[:,2].astype(int) ], axis=1) ) # Distance to the third neighbor [11.3820, 10.6725, 10.8510, 11.6071, 11.1968] """ _, cat, k, _ = get_type(output) return Genred( formula, list(aliases), reduction_op="ArgKMin", axis=cat2axis(cat), opt_arg=k, **kwargs ) def generic_argmin(formula, output, *aliases, **kwargs): r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with an "ArgMin" reduction. Args: formula (string): Scalar-valued symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. output (string): An identifier of the form ``"AL = TYPE(1)"`` that specifies the category and dimension of the output variable. Here: - ``AL`` is a dummy alphanumerical name. - ``TYPE`` is a *category*. One of: - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1. - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0. *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`. Keyword Args: dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. The supported values are: - **dtype** = ``"float16"``, - **dtype** = ``"float32"``, - **dtype** = ``"float64"``. Returns: A generic reduction that can be called on arbitrary NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`. Example: Bruteforce nearest neighbor search in dimension 100: >>> nearest_neighbor = generic_argmin( ... 'SqDist(x, y)', # Formula ... 'a = Vi(1)', # Output: 1 scalar per line ... 'x = Vi(100)', # 1st input: dim-100 vector per line ... 'y = Vj(100)') # 2nd input: dim-100 vector per line >>> x = np.random.randn(5, 100) >>> y = np.random.randn(20000, 100) >>> a = nearest_neighbor(x, y) >>> print(a) [[ 8761.], [ 2836.], [ 906.], [16130.], [ 3158.]] >>> dists = np.linalg.norm(x - y[ a.view(-1).long() ], axis=1) # Distance to the nearest neighbor >>> print(dists) [10.5926, 10.9132, 9.9694, 10.1396, 10.1955] """ _, cat, _, _ = get_type(output) return Genred( formula, list(aliases), reduction_op="ArgMin", axis=cat2axis(cat), **kwargs )
keops-main
pykeops/pykeops/numpy/generic/generic_ops.py
import copy import re import math import numpy as np from pykeops.common.utils import check_broadcasting def same_or_one_test(*dims): # test wether input dimensions are compatible with broadcasting return len(set(list(dims) + [1])) <= 2 def is_scalar_and_equals(x, val): # test wether the input x is a Python scalar and # that its value equals val if isinstance(x, (int, float, complex)) and not isinstance(x, bool): return x == val else: return False def is_complex_lazytensor(x): return isinstance(x, ComplexGenericLazyTensor) class GenericLazyTensor: r"""Symbolic wrapper for NumPy arrays and PyTorch tensors. This is the abstract class, end user should use :class:`pykeops.numpy.LazyTensor` or :class:`pykeops.torch.LazyTensor`. :class:`LazyTensor` encode numerical arrays through the combination of a symbolic, **mathematical formula** and a list of **small data arrays**. They can be used to implement efficient algorithms on objects that are **easy to define**, but **impossible to store** in memory (e.g. the matrix of pairwise distances between two large point clouds). :class:`LazyTensor` may be created from standard NumPy arrays or PyTorch tensors, combined using simple mathematical operations and converted back to NumPy arrays or PyTorch tensors with efficient reduction routines, which outperform standard tensorized implementations by two orders of magnitude. """ variables = () symbolic_variables = () formula = None formula2 = None ndim = None tools = None Genred = None KernelSolve = None batchdims = None ni = None nj = None axis = None ranges = None # Block-sparsity pattern backend = None # "CPU", "GPU", "GPU_2D", etc. _dtype = None is_complex = False def __init__(self, x=None, axis=None): r"""Creates a KeOps symbolic variable. Args: x: May be either: - A *float*, a *list of floats*, a *NumPy float*, a *0D or 1D NumPy array*, a *0D or 1D PyTorch tensor*, in which case the :class:`LazyTensor` represents a constant **vector of parameters**, to be broadcasted on other :class:`LazyTensor`. - A *2D NumPy array* or *PyTorch tensor*, in which case the :class:`LazyTensor` represents a **variable** indexed by :math:`i` if **axis=0** or :math:`j` if **axis=1**. - A *3D+ NumPy array* or *PyTorch tensor* with a dummy dimension (=1) at position -3 or -2, in which case the :class:`LazyTensor` represents a **variable** indexed by :math:`i` or :math:`j`, respectively. Dimensions before the last three will be handled as **batch dimensions**, that may support operator broadcasting. - A *tuple of 3 integers* (ind,dim,cat), in which case the :class:`LazyTensor` represents a :doc:`symbolic variable <../../../api/math-operations>` that should be instantiated at call-time. - An *integer*, in which case the :class:`LazyTensor` represents an **integer constant** handled efficiently at compilation time. - **None**, for internal use. axis (int): should be equal to 0 or 1 if **x** is a 2D tensor, and **None** otherwise. .. warning:: A :class:`LazyTensor` constructed from a NumPy array or a PyTorch tensor retains its **dtype** (float16, float32 or float64) and **device** properties (is it stored on the GPU?). Since KeOps does **not** support automatic type conversions and data transfers, please make sure **not to mix** :class:`LazyTensor` that come from different frameworks/devices or which are stored with different precisions. """ # Duck typing attribute, to be used instead of "isinstance(self, GenericLazyTensor)" # This is a workaround for an importlib.reload messy situation, # and will come handy when we'll start supporting Block GenericLazyTensors # and other custom operators. self.__GenericLazyTensor__ = True self.get_tools() # A KeOps LazyTensor can be built from many different objects: if x is not None: # Stage 1: Are we dealing with simple numbers? --------------------- typex = type(x) if typex == tuple: # x is not a Tensor but a triplet of integers # (ind,dim,cat) that specifies an abstract variable if ( len(x) != 3 or not isinstance(x[0], int) or not isinstance(x[1], int) or not isinstance(x[2], int) ): raise ValueError( "LazyTensors(tuple) is only valid if tuple = (ind,dim,cat) is a triplet of integers." ) if axis is not None: raise ValueError( "'axis' parameter should not be given when 'x' is of the form (ind,dim,cat)." ) self.symbolic_variables = (x,) self.ndim = x[1] self.axis = x[2] self.formula = "VarSymb({},{},{})".format(x[0], self.ndim, self.axis) return # That's it! # Integer constants are best handled directly by the compiler elif typex == int: self.formula = "IntCst(" + str(x) + ")" self.ndim = 1 self.axis = 2 return # That's it! # Float numbers must be encoded as Parameters, as C++'s templating system cannot deal # with floating point arithmetics. elif typex in self.tools.float_types: x = [x] # Convert to list and go to stage 2 typex = list # Stage 2: Dealing with python lists, understood as arrays of floats, # and handled as Parameter variables without fixed dtype if typex == list: if axis is not None and axis != 2: raise ValueError( "Lists of numbers are handled as Parameter " "variables, with an optional 'axis' argument that is equal to 2." ) self.variables = (x,) self.ndim = len(x) self.axis = 2 self.formula = "Var({},{},2)".format(id(x), self.ndim) return # That's it! else: self._dtype = self.tools.dtypename(self.tools.dtype(x)) typex = type(x) if ( typex not in [type(None), tuple, int, float, list, self.tools.arraytype] + self.tools.float_types ): raise TypeError( "LazyTensors should be built from " + self.tools.arrayname + ", " "float/integer numbers, lists of floats or 3-uples of integers. " "Received: {}".format(typex) ) if typex == self.tools.arraytype and len(x.shape) == 0: x = x.view(1) elif typex in self.tools.float_types: x = self.tools.arraytype([x]).view(1) if typex == self.tools.arraytype: if len(x.shape) >= 3: # Infer axis from the input shape # If x is a 3D+ array, its shape must be either (..,M,1,D) or (..,1,N,D) or (..,1,1,D). # We infer axis from shape and squeeze out the "1" dimensions: if axis is not None: raise ValueError( "'axis' parameter should not be given when 'x' is a 3D tensor." ) if len(x.shape) > 3: # We're in "batch mode" self.batchdims = tuple(x.shape[:-3]) if x.shape[-3] == 1: if x.shape[-2] == 1: # (..,1,1,D) -> Pm(D) x = x.squeeze(-2).squeeze(-2) axis = 2 else: # (..,1,N,D) -> Vj(D) x = x.squeeze(-3) axis = 1 elif x.shape[-2] == 1: # (M,1,D) -> Vi(D) x = x.squeeze(-2) axis = 0 else: raise ValueError( "If 'x' is a 3D+ tensor, its shape should be one of (..,M,1,D), (..,1,N,D) or (..,1,1,D)." ) # Stage 4: x is now encoded as a 2D or 1D array + batch dimensions -------------------- if ( len(x.shape) >= 2 and axis != 2 ): # shape is (..,M,D) or (..,N,D), with an explicit 'axis' parameter if axis is None or axis not in (0, 1): raise ValueError( "When 'x' is encoded as a 2D array, LazyTensor expects an explicit 'axis' value in {0,1}." ) # id(x) is used as temporary identifier for KeOps "Var", # this identifier will be changed when calling method "fixvariables" # But first we do a small hack, in order to distinguish same array involved twice in a formula but with # different axis (e.g. Vi(x)-Vj(x) formula): we do a dummy reshape in order to get a different id if axis == 1: x = self.tools.view(x, x.shape) self.variables = (x,) self.ndim = x.shape[-1] self.axis = axis self.formula = "Var({},{},{})".format(id(x), self.ndim, self.axis) if axis == 0: self.ni = x.shape[-2] else: self.nj = x.shape[-2] self._dtype = self.tools.dtypename(self.tools.dtype(x)) elif ( len(x.shape) == 1 or axis == 2 ): # shape is (D,): x is a "Pm(D)" parameter if axis is not None and axis != 2: raise ValueError( "When 'x' is encoded as a 1D or 0D array, 'axis' must be None or 2 (= Parameter variable)." ) self.variables = (x,) self.ndim = x.shape[-1] self.axis = 2 self.formula = "Var({},{},2)".format(id(x), self.ndim) else: raise ValueError( "LazyTensors can be built from 0D, 1D, 2D or 3D+ tensors. " + "Received x of shape: {}.".format(x.shape) ) def lt_constructor(self, x=None, axis=None): r"""This method is specialized in :class:`pykeops.numpy.LazyTensor` and :class:`pykeops.torch.LazyTensor`. It returns a new instance of a LazyTensor (numpy or pytorch).""" pass def get_tools(self): r"""This method is specialized in :class:`pykeops.numpy.LazyTensor` and :class:`pykeops.torch.LazyTensor`. It populates the tools class.""" pass def fixvariables(self): r"""If needed, assigns final labels to each variable and pads their batch dimensions prior to a :mod:`Genred()` call.""" newvars = () if self.formula2 is None: self.formula2 = "" # We don't want to get regexp errors... device = None # Useful to load lists (and float constants) on the proper device for v in self.variables: device = self.tools.device(v) if device is not None: break i = len(self.symbolic_variables) # The first few labels are already taken... # So let's loop over our tensors, and give them labels: for v in self.variables: idv = id(v) if type(v) == list: v = self.tools.array(v, self._dtype, device) # Replace "Var(idv," by "Var(i," and increment 'i': tag = "Var({},".format(idv) if tag in self.formula + self.formula2: self.formula = self.formula.replace(tag, "Var({},".format(i)) self.formula2 = self.formula2.replace(tag, "Var({},".format(i)) # Detect if v is meant to be used as a variable or as a parameter: str_cat_v = re.search( r"Var\({},\d+,([012])\)".format(i), self.formula + self.formula2 ).group(1) is_variable = 1 if str_cat_v in ("0", "1") else 0 dims_to_pad = self.nbatchdims + 1 + is_variable - len(v.shape) padded_v = self.tools.view(v, (1,) * dims_to_pad + v.shape) newvars += (padded_v,) if ( hasattr(self, "rec_multVar_highdim") and self.rec_multVar_highdim == idv ): self.rec_multVar_highdim = i i += 1 # "VarSymb(..)" appear when users rely on the "LazyTensor(Ind,Dim,Cat)" syntax, # for the sake of disambiguation: self.formula = self.formula.replace( "VarSymb(", "Var(" ) # We can now replace them with self.formula2 = self.formula2.replace( "VarSymb(", "Var(" ) # actual "Var" symbols if self.formula2 == "": self.formula2 = None # The pre-processing step is now over self.variables = newvars def separate_kwargs(self, kwargs): # separating keyword arguments for Genred init vs Genred call... # Currently the only four additional optional keyword arguments that are passed to Genred init # are accuracy options: dtype_acc, use_double_acc and sum_scheme, # chunk mode option enable_chunks, # and compiler option optional_flags. kwargs_init = [] kwargs_call = [] for key in kwargs: if key in ( "dtype_acc", "use_double_acc", "sum_scheme", "enable_chunks", "optional_flags", ): kwargs_init += [(key, kwargs[key])] else: kwargs_call += [(key, kwargs[key])] kwargs_init = dict(kwargs_init) kwargs_call = dict(kwargs_call) return kwargs_init, kwargs_call def promote(self, other, props, is_complex=False): r""" Creates a new :class:`LazyTensor` whose **None** properties are set to those of **self** or **other**. """ res = self.lt_constructor(is_complex=is_complex) for prop in props: y, x = getattr(self, prop), getattr(other, prop) if x is not None: if y is not None: if prop == "ranges": x_eq_y = all( tuple( map(lambda x, y: self.tools.eq(x, y).all().item(), x, y) ) ) else: x_eq_y = x == y if not (x_eq_y): raise ValueError( "Incompatible values for attribute {}: {} and {}.".format( prop, x, y ) ) setattr(res, prop, x) else: setattr(res, prop, y) return res def init(self, is_complex=False): r""" Creates a copy of a :class:`LazyTensor`, without **formula** attribute. """ res = self.lt_constructor(is_complex=is_complex) res.tools = self.tools res._dtype = self._dtype res.Genred = self.Genred res.KernelSolve = self.KernelSolve res.batchdims = self.batchdims res.ni = self.ni res.nj = self.nj res.ranges = self.ranges res.backend = self.backend res.variables = self.variables res.symbolic_variables = self.symbolic_variables return res def join(self, other, is_complex=False): r""" Merges the variables and attributes of two :class:`LazyTensor`, with a compatibility check. This method concatenates tuples of variables, without paying attention to repetitions. """ res = self.promote( other, ( "_dtype", "tools", "Genred", "KernelSolve", "ni", "nj", "ranges", "backend", ), is_complex=is_complex, ) res.symbolic_variables = self.symbolic_variables + other.symbolic_variables # Checks on the batch dimensions - we support broadcasting: res.batchdims = check_broadcasting(self.batchdims, other.batchdims) # N.B.: If needed, variables will be padded with "dummy 1's" just before the Genred call, in self/res.fixvariables(): res.variables = self.variables + other.variables return res # Prototypes for unary and binary operations ============================== def unary( self, operation, dimres=None, opt_arg=None, opt_arg2=None, is_complex=None ): r""" Symbolically applies **operation** to **self**, with optional arguments if needed. The optional argument **dimres** may be used to specify the dimension of the output **result**. """ if is_complex is None: is_complex = self.is_complex # we must prevent any operation if self is the output of a reduction operation, # i.e. if it has a reduction_op field if hasattr(self, "reduction_op"): raise ValueError( "Input is a 'reduced' LazyTensor, no operation can be applied to it. " ) if not dimres: dimres = self.ndim res = self.init(is_complex) # Copy of self, without a formula if opt_arg2 is not None: res.formula = "{}({},{},{})".format( operation, self.formula, opt_arg, opt_arg2 ) elif opt_arg is not None: res.formula = "{}({},{})".format(operation, self.formula, opt_arg) else: res.formula = "{}({})".format(operation, self.formula) res.ndim = dimres return res def binary( self, other, operation, is_operator=False, dimres=None, dimcheck="sameor1", opt_arg=None, opt_pos="last", rversion=False, is_complex=None, ): r"""Symbolically applies **operation** to **self**, with optional arguments if needed. Keyword args: - dimres (int): May be used to specify the dimension of the output **result**. - is_operator (bool, default=False): May be used to specify if **operation** is an operator like ``+``, ``-`` or a "genuine" function. - dimcheck (string): shall we check the input dimensions? Supported values are ``"same"``, ``"sameor1"``, or **None**. - rversion (Boolean): shall we invert lhs and rhs of the binary op, e.g. as in __radd__, __rmut__, etc... """ # If needed, convert float numbers / lists / arrays / tensors to LazyTensors: if not hasattr(other, "__GenericLazyTensor__"): other = self.lt_constructor(other) if is_complex is None: is_complex = True if (self.is_complex or other.is_complex) else False # we must prevent any operation if self or other is the output of a reduction operation, # i.e. if it has a reduction_op field if hasattr(self, "reduction_op") or hasattr(other, "reduction_op"): raise ValueError( "One of the inputs is a 'reduced' LazyTensor, no operation can be applied to it. " ) # By default, the dimension of the output variable is the max of the two operands: if not dimres: dimres = max(self.ndim, other.ndim) if dimcheck == "same": if self.ndim != other.ndim: raise ValueError( "Operation {} expects inputs of the same dimension. ".format( operation ) + "Received {} and {}.".format(self.ndim, other.ndim) ) elif dimcheck == "sameor1": if self.ndim != other.ndim and self.ndim != 1 and other.ndim != 1: raise ValueError( "Operation {} expects inputs of the same dimension or dimension 1. ".format( operation ) + "Received {} and {}.".format(self.ndim, other.ndim) ) elif dimcheck != None: raise ValueError("incorrect dimcheck keyword in binary operation") res = self.join( other, is_complex=is_complex ) # Merge the attributes and variables of both operands res.ndim = dimres if not rversion: lformula, rformula = self.formula, other.formula else: rformula, lformula = self.formula, other.formula if is_operator: res.formula = "({} {} {})".format(lformula, operation, rformula) elif opt_arg is not None: if hasattr(opt_arg, "__GenericLazyTensor__"): opt_arg = opt_arg.formula if opt_pos == "last": res.formula = "{}({}, {}, {})".format( operation, lformula, rformula, opt_arg ) elif opt_pos == "middle": res.formula = "{}({}, {}, {})".format( operation, lformula, opt_arg, rformula ) else: res.formula = "{}({}, {})".format(operation, lformula, rformula) # special case of multiplication with a variable V : we define a special tag to enable factorization in case # the user requires a sum reduction over the opposite index (or any index if V is a parameter): # for example sum_i V_j k(x_i,y_j) = V_j sum_i k(x_i,y_j), so we will use KeOps reduction for the kernel # k(x_i,y_j) only, then multiply the result with V. if operation == "*" and other.formula[:3] == "Var" and other.ndim > 100: res.rec_multVar_highdim = (self, other) return res def ternary( self, other1, other2, operation, dimres=None, dimcheck="sameor1", opt_arg=None ): r"""Symbolically applies **operation** to **self**, with optional arguments if needed. Keyword args: - dimres (int): May be used to specify the dimension of the output **result**. - is_operator (bool, default=False): May be used to specify if **operation** is an operator like ``+``, ``-`` or a "genuine" function. - dimcheck (string): shall we check the input dimensions? Supported values are ``"same"``, ``"sameor1"``, or **None**. """ # If needed, convert float numbers / lists / arrays / tensors to LazyTensors: if not hasattr(other1, "__GenericLazyTensor__"): other1 = self.lt_constructor(other1) if not hasattr(other2, "__GenericLazyTensor__"): other2 = self.lt_constructor(other2) # we must prevent any operation if self, other1 or other2 is the output of a reduction operation, # i.e. if it has a reduction_op field if ( hasattr(self, "reduction_op") or hasattr(other1, "reduction_op") or hasattr(other2, "reduction_op") ): raise ValueError( "One of the inputs is a 'reduced' LazyTensor, no operation can be applied to it. " ) # By default, the dimension of the output variable is the max of the three operands: if not dimres: dimres = max(self.ndim, other1.ndim, other2.ndim) if dimcheck == "same": if (self.ndim != other1.ndim) or (self.ndim != other2.ndim): raise ValueError( "Operation {} expects inputs of the same dimension. ".format( operation ) + "Received {}, {} and {}.".format( self.ndim, other1.ndim, other2.ndim ) ) elif dimcheck == "sameor1": if not same_or_one_test(self.ndim, other1.ndim, other2.ndim): raise ValueError( "Operation {} expects inputs of the same dimension or dimension 1. ".format( operation ) + "Received {}, {} and {}.".format( self.ndim, other1.ndim, other2.ndim ) ) elif dimcheck != None: raise ValueError("incorrect dimcheck keyword in binary operation") res = self.join( other1.join(other2) ) # Merge the attributes and variables of operands res.ndim = dimres if opt_arg is not None: if hasattr(opt_arg, "__GenericLazyTensor__"): opt_arg = opt_arg.formula res.formula = "{}({}, {}, {}, {})".format( operation, self.formula, other1.formula, other2.formula, opt_arg ) else: res.formula = "{}({}, {}, {})".format( operation, self.formula, other1.formula, other2.formula ) return res # Prototypes for reduction operations ===================================== def reduction( self, reduction_op, other=None, opt_arg=None, axis=None, dim=None, call=True, is_complex=None, **kwargs ): r""" Applies a reduction to a :class:`LazyTensor`. This method is used internally by the LazyTensor class. Args: reduction_op (string): the string identifier of the reduction, which will be passed to the KeOps routines. Keyword Args: other: May be used to specify some **weights** ; depends on the reduction. opt_arg: typically, some integer needed by ArgKMin reductions ; depends on the reduction. axis (integer): The axis with respect to which the reduction should be performed. Supported values are **nbatchdims** and **nbatchdims + 1**, where **nbatchdims** is the number of "batch" dimensions before the last three (:math:`i` indices, :math:`j` indices, variables' dimensions). dim (integer): alternative keyword for the **axis** argument. call (True or False): Should we actually perform the reduction on the current variables? If **True**, the returned object will be a NumPy array or a PyTorch tensor. Otherwise, we simply return a callable :class:`LazyTensor` that may be used as a :mod:`pykeops.numpy.Genred` or :mod:`pykeops.torch.Genred` function on arbitrary tensor data. backend (string): Specifies the map-reduce scheme, as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module. device_id (int, default=-1): Specifies the GPU that should be used to perform the computation; a negative value lets your system choose the default GPU. This parameter is only useful if your system has access to several GPUs. ranges (6-uple of IntTensors, None by default): Ranges of integers that specify a :doc:`block-sparse reduction scheme <../../sparsity>` as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module. If **None** (default), we simply use a **dense Kernel matrix** as we loop over all indices :math:`i\in[0,M)` and :math:`j\in[0,N)`. dtype_acc (string, default ``"auto"``): type for accumulator of reduction, before casting to dtype. It improves the accuracy of results in case of large sized data, but is slower. Default value "auto" will set this option to the value of dtype. The supported values are: - **dtype_acc** = ``"float16"`` : allowed only if dtype is "float16". - **dtype_acc** = ``"float32"`` : allowed only if dtype is "float16" or "float32". - **dtype_acc** = ``"float64"`` : allowed only if dtype is "float32" or "float64".. use_double_acc (bool, default False): same as setting dtype_acc="float64" (only one of the two options can be set) If True, accumulate results of reduction in float64 variables, before casting to float32. This can only be set to True when data is in float32 or float64. It improves the accuracy of results in case of large sized data, but is slower. sum_scheme (string, default ``"auto"``): method used to sum up results for reductions. This option may be changed only when reduction_op is one of: "Sum", "MaxSumShiftExp", "LogSumExp", "Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight". Default value "auto" will set this option to "block_red" for these reductions. Possible values are: - **sum_scheme** = ``"direct_sum"``: direct summation - **sum_scheme** = ``"block_sum"``: use an intermediate accumulator in each block before accumulating in the output. This improves accuracy for large sized data. - **sum_scheme** = ``"kahan_scheme"``: use Kahan summation algorithm to compensate for round-off errors. This improves accuracy for large sized data. enable_chunks (bool, default True): enable automatic selection of special "chunked" computation mode for accelerating reductions with formulas involving large dimension variables. out (2d NumPy array or PyTorch Tensor, None by default): The output numerical array, for in-place computation. If provided, the output array should all have the same ``dtype``, be **contiguous** and be stored on the **same device** as the arguments. Moreover it should have the correct shape for the output. """ if is_complex is None: if other is None: is_complex = self.is_complex else: is_complex = self.is_complex or other.is_complex if axis is None: axis = dim # NumPy uses axis, PyTorch uses dim... if axis - self.nbatchdims not in (0, 1): raise ValueError( "Reductions must be called with 'axis' (or 'dim') equal to the number of batch dimensions + 0 or 1." ) if other is None: res = self.init(is_complex=is_complex) # ~ self.copy() res.formula2 = None else: res = self.join(other, is_complex=is_complex) res.formula2 = other.formula res.formula = self.formula res.reduction_op = reduction_op res.axis = axis - self.nbatchdims res.opt_arg = opt_arg kwargs_init, kwargs_call = self.separate_kwargs(kwargs) res.kwargs = kwargs_call res.ndim = self.ndim if reduction_op == "Sum" and hasattr(self, "rec_multVar_highdim"): if res.axis != self.rec_multVar_highdim[1].axis: return ( self.rec_multVar_highdim[0].sum(axis=axis) * self.rec_multVar_highdim[1].variables[0] ) res.rec_multVar_highdim = id(self.rec_multVar_highdim[1].variables[0]) else: res.rec_multVar_highdim = None if res._dtype is not None: res.fixvariables() # Turn the "id(x)" numbers into consecutive labels # "res" now becomes a callable object: res.callfun = res.Genred( res.formula, [], reduction_op=res.reduction_op, axis=res.axis, opt_arg=res.opt_arg, formula2=res.formula2, **kwargs_init, rec_multVar_highdim=res.rec_multVar_highdim ) if call and len(res.symbolic_variables) == 0 and res._dtype is not None: return res() else: return res def solve(self, other, var=None, call=True, **kwargs): r""" Solves a positive definite linear system of the form ``sum(self) = other`` or ``sum(self*var) = other`` , using a conjugate gradient solver. Args: self (:class:`LazyTensor`): KeOps variable that encodes a symmetric positive definite matrix / linear operator. other (:class:`LazyTensor`): KeOps variable that encodes the second member of the equation. Keyword args: var (:class:`LazyTensor`): If **var** is **None**, **solve** will return the solution of the ``self * var = other`` equation. Otherwise, if **var** is a KeOps symbolic variable, **solve** will assume that **self** defines an expression that is linear with respect to **var** and solve the equation ``self(var) = other`` with respect to **var**. alpha (float, default=1e-10): Non-negative **ridge regularization** parameter. call (bool): If **True** and if no other symbolic variable than **var** is contained in **self**, **solve** will return a tensor solution of our linear system. Otherwise **solve** will return a callable :class:`LazyTensor`. backend (string): Specifies the map-reduce scheme, as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module. device_id (int, default=-1): Specifies the GPU that should be used to perform the computation; a negative value lets your system choose the default GPU. This parameter is only useful if your system has access to several GPUs. ranges (6-uple of IntTensors, None by default): Ranges of integers that specify a :doc:`block-sparse reduction scheme <../../sparsity>` as detailed in the documentation of the :mod:`Genred <pykeops.torch.Genred>` module. If **None** (default), we simply use a **dense Kernel matrix** as we loop over all indices :math:`i\in[0,M)` and :math:`j\in[0,N)`. dtype_acc (string, default ``"auto"``): type for accumulator of reduction, before casting to dtype. It improves the accuracy of results in case of large sized data, but is slower. Default value "auto" will set this option to the value of dtype. The supported values are: - **dtype_acc** = ``"float16"`` : allowed only if dtype is "float16". - **dtype_acc** = ``"float32"`` : allowed only if dtype is "float16" or "float32". - **dtype_acc** = ``"float64"`` : allowed only if dtype is "float32" or "float64".. use_double_acc (bool, default False): same as setting dtype_acc="float64" (only one of the two options can be set) If True, accumulate results of reduction in float64 variables, before casting to float32. This can only be set to True when data is in float32 or float64. It improves the accuracy of results in case of large sized data, but is slower. sum_scheme (string, default ``"auto"``): method used to sum up results for reductions. This option may be changed only when reduction_op is one of: "Sum", "MaxSumShiftExp", "LogSumExp", "Max_SumShiftExpWeight", "LogSumExpWeight", "SumSoftMaxWeight". Default value "auto" will set this option to "block_red" for these reductions. Possible values are: - **sum_scheme** = ``"direct_sum"``: direct summation - **sum_scheme** = ``"block_sum"``: use an intermediate accumulator in each block before accumulating in the output. This improves accuracy for large sized data. - **sum_scheme** = ``"kahan_scheme"``: use Kahan summation algorithm to compensate for round-off errors. This improves accuracy for large sized data. enable_chunks (bool, default True): enable automatic selection of special "chunked" computation mode for accelerating reductions with formulas involving large dimension variables. .. warning:: Please note that **no check** of symmetry and definiteness will be performed prior to our conjugate gradient descent. """ if not hasattr(other, "__GenericLazyTensor__"): other = self.lt_constructor( x=other, axis=0 ) # a vector is normally indexed by "i" # If given, var is symbolic variable corresponding to unknown # other must be a variable equal to the second member of the linear system, # and it may be symbolic. If it is symbolic, its index should match the index of var # if other is not symbolic, all variables in self must be non symbolic if len(other.symbolic_variables) == 0 and len(self.symbolic_variables) != 0: raise ValueError("If 'self' has symbolic variables, so should 'other'.") # we infer axis of reduction as the opposite of the axis of output axis = 1 - other.axis if var is None: # this is the classical mode: we want to invert sum(self*var) = other # we define var as a new symbolic variable with same dimension as other # and we assume axis of var is same as axis of reduction varindex = len(self.symbolic_variables) var = self.lt_constructor((varindex, other.ndim, axis)) res = self * var else: # var is given and must be a symbolic variable which is already inside self varindex = var.symbolic_variables[0][0] res = self.init() res.formula = self.formula res.formula2 = None res.reduction_op = "Solve" res.varindex = varindex res.varformula = var.formula.replace("VarSymb", "Var") res.other = other res.axis = axis kwargs_init, res.kwargs = self.separate_kwargs(kwargs) res.ndim = self.ndim if other.ndim > 100: res.rec_multVar_highdim = varindex else: res.rec_multVar_highdim = None if res._dtype is not None: res.fixvariables() res.callfun = res.KernelSolve( res.formula, [], res.varformula, res.axis, **kwargs_init, rec_multVar_highdim=res.rec_multVar_highdim ) # we call if call=True, if other is not symbolic, and if the dtype is set if call and len(other.symbolic_variables) == 0 and res._dtype is not None: return res() else: return res def __call__(self, *args, **kwargs): r""" Executes a :mod:`Genred <pykeops.torch.Genred>` or :mod:`KernelSolve <pykeops.torch.KernelSolve>` call on the input data, as specified by **self.formula** . """ if not hasattr(self, "reduction_op"): raise ValueError( "A LazyTensor object may be called only if it corresponds to the output of a reduction operation or solve operation." ) self.kwargs.update(kwargs) if self.ranges is not None and "ranges" not in self.kwargs: self.kwargs.update({"ranges": self.ranges}) if self.backend is not None and "backend" not in self.kwargs: self.kwargs.update({"backend": self.backend}) if ( self._dtype is None ): # This can only happen if we haven't encountered 2D or 3D arrays just yet... self.get_tools() self._dtype = self.tools.dtypename( self.tools.dtype(args[0]) ) # crash if LazyTensor is called self.fixvariables() kwargs_init, self.kwargs = self.separate_kwargs(self.kwargs) if self.reduction_op == "Solve": self.callfun = self.KernelSolve( self.formula, [], self.formula2, self.axis, self._dtype, **kwargs_init, rec_multVar_highdim=self.rec_multVar_highdim ) else: self.callfun = self.Genred( self.formula, [], self.reduction_op, self.axis, self._dtype, self.opt_arg, self.formula2, **kwargs_init, rec_multVar_highdim=self.rec_multVar_highdim ) if self.reduction_op == "Solve" and len(self.other.symbolic_variables) == 0: # here args should be empty, according to our rule if args != (): raise ValueError("no input required") # we replace by other args = (self.other.variables[0],) return self.callfun(*args, *self.variables, **self.kwargs) def __str__(self): r""" Returns a verbose string identifier. """ tmp = self.init(is_complex=self.is_complex) # ~ self.copy() tmp.formula = self.formula tmp.formula2 = None if not hasattr(self, "formula2") else self.formula2 tmp.fixvariables() # Replace Var(id(x),...) with consecutive labels string = "KeOps LazyTensor\n formula: {}".format(tmp.formula) if len(self.symbolic_variables) > 0: string += "\n symbolic variables: Var{}".format( self.symbolic_variables[0] ) for var in self.symbolic_variables[1:]: string += ", Var{}".format(var) string += "\n shape: {}".format(self.shape) if hasattr(self, "reduction_op"): string += "\n reduction: {} (axis={})".format( self.reduction_op, self.axis ) if tmp.formula2 is not None: string += "\n formula2: {}".format(tmp.formula2) if hasattr(self, "opt_arg") and self.opt_arg is not None: string += "\n opt_arg: {}".format(self.opt_arg) return string @property def dtype(self): return self._dtype @property def _shape(self): r"""Returns the internal shape of the LazyTensor.""" btch = () if self.batchdims is None else self.batchdims ni = 1 if self.ni is None else self.ni nj = 1 if self.nj is None else self.nj ndim = 1 if self.ndim is None else self.ndim return btch + (ni, nj, ndim) @property def shape(self): r"""Returns the shape of the LazyTensor""" s = self._shape if s[-1] == 1: return s[:-1] else: return s def dim(self): r""" Just as in PyTorch, returns the number of dimensions of a :class:`LazyTensor`. """ return len(self._shape) @property def nbatchdims(self): return 0 if self.batchdims is None else len(self.batchdims) # List of supported operations ============================================ # N.B.: This flag prevents NumPy (and also PyTorch ?) from overriding # the KeOps implementations of __radd__, __rdiv___, etc. written below. # For instance, if x is a NumPy array and y is a KeOps LazyTensor, # writing "x+y" will call y.__radd__(x) (LazyTensor method) instead # of x.__add__(y) (NumPy method) __array_ufunc__ = None # Arithmetics -------------------------------------------------------------- def addop(self, other, **kwargs): return self.binary(other, "+", is_operator=True, **kwargs) def __add__(self, other): r""" Broadcasted addition operator - a binary operation. ``x + y`` returns a :class:`LazyTensor` that encodes, symbolically, the addition of ``x`` and ``y``. """ if is_scalar_and_equals(other, 0): return self elif is_complex_lazytensor(other) and not is_complex_lazytensor(self): return self.real2complex().addop(other) else: return self.addop(other) def __radd__(self, other): r""" Broadcasted addition operator - a binary operation. ``x + y`` returns a :class:`LazyTensor` that encodes, symbolically, the addition of ``x`` and ``y``. """ if is_scalar_and_equals(other, 0): return self else: return self.addop(other, rversion=True) def subop(self, other, **kwargs): return self.binary(other, "-", is_operator=True, **kwargs) def __sub__(self, other): r""" Broadcasted subtraction operator - a binary operation. ``x - y`` returns a :class:`LazyTensor` that encodes, symbolically, the subtraction of ``x`` and ``y``. """ if is_scalar_and_equals(other, 0): return self elif is_complex_lazytensor(other) and not is_complex_lazytensor(self): return self.real2complex().subop(other) else: return self.subop(other) def __rsub__(self, other): r""" Broadcasted subtraction operator - a binary operation. ``x - y`` returns a :class:`LazyTensor` that encodes, symbolically, the subtraction of ``x`` and ``y``. """ if is_scalar_and_equals(other, 0): return self.unary("Minus") else: return self.subop(other, rversion=True) def mulop(self, other, **kwargs): return self.binary(other, "*", is_operator=True, **kwargs) def __mul__(self, other): r""" Broadcasted element-wise product - a binary operation. ``x * y`` returns a :class:`LazyTensor` that encodes, symbolically, the elementwise product of ``x`` and ``y``. """ if is_scalar_and_equals(other, 0): return 0 elif is_scalar_and_equals(other, 1): return self elif is_scalar_and_equals(other, -1): return self.unary("Minus") elif is_complex_lazytensor(other) and not is_complex_lazytensor(self): return other.mulop(self) elif self.tools.detect_complex(other) and not is_complex_lazytensor(self): return self.lt_constructor(other).mulop(self) else: return self.mulop(other) def __rmul__(self, other): r""" Broadcasted element-wise product - a binary operation. ``x * y`` returns a :class:`LazyTensor` that encodes, symbolically, the elementwise product of ``x`` and ``y``. """ if is_scalar_and_equals(other, 0): return 0 elif is_scalar_and_equals(other, 1): return self elif is_scalar_and_equals(other, -1): return self.unary("Minus") elif self.tools.detect_complex(other) and not is_complex_lazytensor(self): return self.real2complex().mulop(self.lt_constructor(other)) else: return self.mulop(other, rversion=True) def divop(self, other, **kwargs): return self.binary(other, "/", is_operator=True, **kwargs) def __truediv__(self, other): r""" Broadcasted element-wise division - a binary operation. ``x / y`` returns a :class:`LazyTensor` that encodes, symbolically, the elementwise division of ``x`` by ``y``. """ if is_scalar_and_equals(other, 1): return self elif is_complex_lazytensor(other) and not is_complex_lazytensor(self): return self.real2complex().divop(other) else: return self.divop(other) def __rtruediv__(self, other): r""" Broadcasted element-wise division - a binary operation. ``x / y`` returns a :class:`LazyTensor` that encodes, symbolically, the elementwise division of ``x`` by ``y``. """ if is_scalar_and_equals(other, 0): return 0 elif is_scalar_and_equals(other, 1): return self.unary("Inv") else: return self.divop(other, rversion=True) def __or__(self, other): r""" Euclidean scalar product - a binary operation. ``(x|y)`` returns a :class:`LazyTensor` that encodes, symbolically, the scalar product of ``x`` and ``y`` which are assumed to have the same shape. """ return self.binary(other, "|", is_operator=True, dimres=1, dimcheck="same") def __ror__(self, other): r""" Euclidean scalar product - a binary operation. ``(x|y)`` returns a :class:`LazyTensor` that encodes, symbolically, the scalar product of ``x`` and ``y`` which are assumed to have the same shape. """ return self.binary( other, "|", is_operator=True, dimres=1, dimcheck="same", rversion=True ) # Unary arithmetics -------------------------------------------------------- def __abs__(self): r""" Element-wise absolute value - a unary operation. ``abs(x)`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise absolute value of ``x``. """ return self.unary("Abs") def abs(self): r""" Element-wise absolute value - a unary operation. ``x.abs()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise absolute value of ``x``. """ return abs(self) def __neg__(self): r""" Element-wise minus - a unary operation. ``-x`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise opposite of ``x``. """ return self.unary("Minus") # Simple functions --------------------------------------------------------- def exp(self): r""" Element-wise exponential - a unary operation. ``x.exp()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise exponential of ``x``. """ return self.unary("Exp") def log(self): r""" Element-wise logarithm - a unary operation. ``x.log()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise logarithm of ``x``. """ return self.unary("Log") def xlogx(self): r""" Element-wise x*log(x) function - a unary operation. ``x.xlogx()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise ``x`` times logarithm of ``x`` (with value 0 at 0). """ return self.unary("XLogX") def cos(self): r""" Element-wise cosine - a unary operation. ``x.cos()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise cosine of ``x``. """ return self.unary("Cos") def sin(self): r""" Element-wise sine - a unary operation. ``x.sin()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise sine of ``x``. """ return self.unary("Sin") def sinxdivx(self): r""" Element-wise sin(x)/x function - a unary operation. ``x.sinxdivx()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise sinxdivx function of ``x``. """ return self.unary("SinXDivX") def sinc(self): r""" Element-wise sinc(x) = sin(pi x) / (pi x) function - a unary operation. ``x.sinc()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise sinc function of ``x``. """ return (math.pi * self).sinxdivx() def asin(self): r""" Element-wise arcsine - a unary operation. ``x.asin()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise arcsine of ``x``. """ return self.unary("Asin") def acos(self): r""" Element-wise arccosine - a unary operation. ``x.acos()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise arccosine of ``x``. """ return self.unary("Acos") def atan(self): r""" Element-wise arctangent - a unary operation. ``x.atan()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise arctangent of ``x``. """ return self.unary("Atan") def atan2(self, other): r""" Element-wise atan2 - a binary operation. ``y.atan2(x)`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise atan2 of ``x`` and ``y``. """ return self.binary(other, "Atan2", dimcheck="sameor1") def sqrt(self): r""" Element-wise square root - a unary operation. ``x.sqrt()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise square root of ``x``. """ return self.unary("Sqrt") def rsqrt(self): r""" Element-wise inverse square root - a unary operation. ``x.rsqrt()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise inverse square root of ``x``. """ return self.unary("Rsqrt") def __pow__(self, other): r""" Broadcasted element-wise power operator - a binary operation. ``x**y`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise value of ``x`` to the power ``y``. Note: - if **y = 2**, ``x**y`` relies on the ``"Square"`` KeOps operation; - if **y = 0.5**, ``x**y`` uses on the ``"Sqrt"`` KeOps operation; - if **y = -0.5**, ``x**y`` uses on the ``"Rsqrt"`` KeOps operation. """ if type(other) == int: return ( self.unary("Square") if other == 2 else self.unary("Pow", opt_arg=other) ) elif type(other) == float: if other == 0.5: return self.unary("Sqrt") elif other == -0.5: return self.unary("Rsqrt") else: other = self.lt_constructor(other) if hasattr(other, "__GenericLazyTensor__"): if other.ndim == 1 or other.ndim == self.ndim: return self.binary(other, "Powf", dimcheck=None) else: raise ValueError( "Incompatible dimensions for the LazyTensor and its exponent: " + "{} and {}.".format(self.ndim, other.ndim) ) else: raise ValueError( "The exponent should be an integer, a float number or a LazyTensor." ) def power(self, other): r""" Broadcasted element-wise power operator - a binary operation. ``pow(x,y)`` is equivalent to ``x**y``. """ return self**other def square(self): r""" Element-wise square - a unary operation. ``x.square()`` is equivalent to ``x**2`` and returns a :class:`LazyTensor` that encodes, symbolically, the element-wise square of ``x``. """ return self.unary("Square") def sign(self): r""" Element-wise sign in {-1,0,+1} - a unary operation. ``x.sign()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise sign of ``x``. """ return self.unary("Sign") def step(self): r""" Element-wise step function - a unary operation. ``x.step()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise sign of ``x``. """ return self.unary("Step") def relu(self): r""" Element-wise ReLU function - a unary operation. ``x.relu()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise positive part of ``x``. """ return self.unary("ReLU") def clamp(self, other1, other2): r""" Element-wise Clamp function - a ternary operation. ``x.clamp(a,b)`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise clamping of ``x`` in ``(a,b)``. Broadcasting rules apply. a and b may be fixed integers or floats, or other LazyTensors. """ if (type(other1) == int) and (type(other2) == int): return self.unary("ClampInt", opt_arg=other1, opt_arg2=other2) else: return self.ternary(other1, other2, "Clamp", dimcheck="sameor1") def ifelse(self, other1, other2): r""" Element-wise if-else function - a ternary operation. ``x.ifelse(a,b)`` returns a :class:`LazyTensor` that encodes, symbolically, ``a`` where ``x >= 0`` and ``b`` where ``x < 0``. Broadcasting rules apply. a and b may be fixed integers or floats, or other LazyTensors. """ return self.ternary(other1, other2, "IfElse", dimcheck="sameor1") def mod(self, modulus, offset=0): r""" Element-wise modulo with offset function - a ternary operation. ``x.mod(a,b)`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise modulo of ``x`` with modulus ``a`` and offset ``b``. By default b=0, so that x.mod(a) becomes equivalent to the NumPy function mod. Broadcasting rules apply. a and b are fixed integers or float. """ return self.ternary(modulus, offset, "Mod", dimcheck="sameor1") def round(self, other=0): r""" Element-wise rounding function - a unary operation. ``x.round(d)`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise rounding of ``x`` to d decimal places. d is int. """ return self.unary("Round", opt_arg=other) def sqnorm2(self): r""" Squared Euclidean norm - a unary operation. ``x.sqnorm2()`` returns a :class:`LazyTensor` that encodes, symbolically, the squared Euclidean norm of a vector ``x``. """ return self.unary("SqNorm2", dimres=1) def norm2(self): r""" Euclidean norm - a unary operation. ``x.norm2()`` returns a :class:`LazyTensor` that encodes, symbolically, the Euclidean norm of a vector ``x``. """ return self.unary("Norm2", dimres=1) def norm(self, dim): r""" Euclidean norm - a unary operation. ``x.norm(-1)`` is equivalent to ``x.norm2()`` and returns a :class:`LazyTensor` that encodes, symbolically, the Euclidean norm of a vector ``x``. """ if dim not in [-1, len(self._shape) - 1]: raise ValueError("KeOps only supports norms over the last dimension.") return self.norm2() def normalize(self): r""" Vector normalization - a unary operation. ``x.normalize()`` returns a :class:`LazyTensor` that encodes, symbolically, a vector ``x`` divided by its Euclidean norm. """ return self.unary("Normalize") def sqdist(self, other): r""" Squared distance - a binary operation. ``x.sqdist(y)`` returns a :class:`LazyTensor` that encodes, symbolically, the squared Euclidean distance between two vectors ``x`` and ``y``. """ return self.binary(other, "SqDist", dimres=1) def weightedsqnorm(self, other): r""" Weighted squared norm of a LazyTensor ``x`` - a binary operation. ``x.weightedsqnorm(s)`` returns a :class:`LazyTensor` that encodes, symbolically, the weighted squared Norm of a vector ``x`` with weights stored in the LazyTensor ``s``- see the :doc:`main reference page <../../../api/math-operations>` for details. """ if not hasattr(other, "__GenericLazyTensor__"): other = self.lt_constructor(other) if other.ndim not in (1, self.ndim, self.ndim**2): raise ValueError( "Squared norm weights should be of size 1 (scalar), " + "D (diagonal) or D^2 (full symmetric tensor), but received " + "{} with D={}.".format(other.ndim, self.ndim) ) return self.binary( other, "WeightedSqNorm", dimres=1, dimcheck=None, rversion=True ) def weightedsqdist(self, g, s): r""" Weighted squared distance. ``x.weightedsqdist(y, s)`` is equivalent to ``(x - y).weightedsqnorm(s)``. """ if not hasattr(g, "__GenericLazyTensor__"): g = self.lt_constructor(g) if not hasattr(s, "__GenericLazyTensor__"): s = self.lt_constructor(s) return (self - g).weightedsqnorm(s) def elem(self, i): r""" Indexing of a vector - a unary operation. ``x.elem(i)`` returns a :class:`LazyTensor` that encodes, symbolically, the i-th element ``x[i]`` of the vector ``x``. """ if type(i) is not int: raise ValueError("Elem indexing is only supported for integer indices.") if i < 0 or i >= self.ndim: raise ValueError( "Index i={} is out of bounds [0,D) = [0,{}).".format(i, self.ndim) ) return self.unary("Elem", dimres=1, opt_arg=i) def extract(self, i, d): r""" Range indexing - a unary operation. ``x.extract(i, d)`` returns a :class:`LazyTensor` that encodes, symbolically, the sub-vector ``x[i:i+d]`` of the vector ``x``. """ if (type(i) is not int) or (type(d) is not int): raise ValueError("Indexing is only supported for integer indices.") if i < 0 or i >= self.ndim: raise ValueError("Starting index is out of bounds.") if d < 1 or i + d > self.ndim: raise ValueError("Slice dimension is out of bounds.") return self.unary("Extract", dimres=d, opt_arg=i, opt_arg2=d) def __getitem__(self, key): r""" Element or range indexing - a unary operation. ``x[key]`` redirects to the :meth:`elem` or :meth:`extract` methods, depending on the ``key`` argument. Supported values are: - an integer ``k``, in which case ``x[key]`` redirects to ``elem(x,k)``, - a tuple ``..,:,:,k`` with ``k`` an integer, which is equivalent to the case above, - a slice of the form ``k:l``, ``k:`` or ``:l``, with ``k`` and ``l`` two integers, in which case ``x[key]`` redirects to ``extract(x,k,l-k)``, - a tuple of slices of the form ``..,:,:,k:l``, ``..,:,:,k:`` or ``..,:,:,:l``, with ``k`` and ``l`` two integers, which are equivalent to the case above. """ # we allow only these forms: # [..,:,:,k], [..,:,:,k:l], [..,:,:,k:], [..,:,:,:l] # or equivalent [k], [k:l], [k:], [:l] if isinstance(key, tuple): if len(key) == len(self._shape) and key[:-1] == (slice(None),) * ( len(self._shape) - 1 ): key = key[-1] else: raise ValueError( "LazyTensors only support indexing with respect to their last dimension." ) if isinstance(key, slice): if not key.step in [None, 1]: raise ValueError( "LazyTensors do not support sliced indexing with stepsizes > 1." ) if key.start is None: key = slice(0, key.stop) if key.stop is None: key = slice(key.start, self.ndim) return self.extract(key.start, key.stop - key.start) elif isinstance(key, int): return self.elem(key) else: raise ValueError( "LazyTensors only support indexing with integers and vanilla python slices." ) def one_hot(self, D): r""" Encodes a (rounded) scalar value as a one-hot vector of dimension D. ``x.one_hot(D)`` returns a :class:`LazyTensor` that encodes, symbolically, a vector of length D whose round(x)-th coordinate is equal to 1, and the other ones to zero. """ if type(D) is not int: raise ValueError( "One-hot encoding expects an integer dimension of the output vector." ) if self.ndim != 1: raise ValueError("One-hot encoding is only supported for scalar formulas.") return self.unary("OneHot", dimres=D, opt_arg=D) def concat(self, other): r""" Concatenation of two :class:`LazyTensor` - a binary operation. ``x.concat(y)`` returns a :class:`LazyTensor` that encodes, symbolically, the concatenation of ``x`` and ``y`` along their last dimension. """ return self.binary( other, "Concat", dimres=(self.ndim + other.ndim), dimcheck=None ) @staticmethod def concatenate(tuple_of_lt, axis=-1): r""" Concatenation of a tuple of :class:`GenericLazyTensor`. ``GenericLazyTensor.concatenate( (x_1, x_2, ..., x_n), -1)`` returns a :class:`GenericLazyTensor` that encodes, symbolically, the concatenation of ``x_1``, ``x_2``, ..., ``x_n`` along their last dimension. Note that **axis** should be equal to -1 or 2 (if the ``x_i``'s are 3D GenericLazyTensor): GenericLazyTensors only support concatenation and indexing operations with respect to the last dimension. """ if isinstance(tuple_of_lt, tuple): if len(tuple_of_lt) == 0: raise ValueError("Received an empty tuple of LazyTensors.") elif hasattr(tuple_of_lt[0], "__GenericLazyTensor__"): if axis not in [-1, len(tuple_of_lt[0]._shape) - 1]: raise ValueError( "LazyTensor only supports concatenation along the last axis." ) if len(tuple_of_lt) == 1: return tuple_of_lt[0] elif len(tuple_of_lt) == 2: return tuple_of_lt[0].concat(tuple_of_lt[1]) else: return GenericLazyTensor.concatenate( (tuple_of_lt[0].concat(tuple_of_lt[1]),) + tuple_of_lt[2:], axis=-1, ) else: raise ValueError( "LazyTensor.concatenate is implemented on *tuples* of LazyTensors." ) @staticmethod def cat(tuple_of_lt, dim): r""" Concatenation of a tuple of LazyTensors. ``LazyTensor.cat( (x_1, x_2, ..., x_n), -1)`` is a PyTorch-friendly alias for ``LazyTensor.concatenate( (x_1, x_2, ..., x_n), -1)``; just like indexing operations, it is only supported along the last dimension. """ return GenericLazyTensor.concatenate(tuple_of_lt, dim) def matvecmult(self, other): r""" Matrix-vector product - a binary operation. If ``x._shape[-1] == A*B`` and ``y._shape[-1] == B``, ``z = x.matvecmult(y)`` returns a :class:`GenericLazyTensor` such that ``z._shape[-1] == A`` which encodes, symbolically, the matrix-vector product of ``x`` and ``y`` along their last dimension. For details, please check the documentation of the KeOps operation ``"MatVecMult"`` in the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( other, "MatVecMult", dimres=(self.ndim // other.ndim), dimcheck=None ) def vecmatmult(self, other): r""" Vector-matrix product - a binary operation. If ``x._shape[-1] == A`` and ``y._shape[-1] == A*B``, ``z = x.vecmatmult(y)`` returns a :class:`GenericLazyTensor` such that ``z._shape[-1] == B`` which encodes, symbolically, the vector-matrix product of ``x`` and ``y`` along their last dimension. For details, please check the documentation of the KeOps operation ``"VecMacMult"`` in the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( other, "VecMatMult", dimres=(other.ndim // self.ndim), dimcheck=None ) def tensorprod(self, other): r""" Tensor product of vectors - a binary operation. If ``x._shape[-1] == A`` and ``y._shape[-1] == B``, ``z = x.tensorprod(y)`` returns a :class:`GenericLazyTensor` such that ``z._shape[-1] == A*B`` which encodes, symbolically, the tensor product of ``x`` and ``y`` along their last dimension. For details, please check the documentation of the KeOps operation ``"TensorProd"`` in the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( other, "TensorProd", dimres=(other.ndim * self.ndim), dimcheck=None ) def keops_tensordot(self, other, dimfa, dimfb, contfa, contfb, *args): """ Tensor dot product (on KeOps internal dimensions) - a binary operation. :param other: a LazyTensor :param dimfa: tuple of int :param dimfb: tuple of int :param contfa: tuple of int listing contraction dimension of a (could be empty) :param contfb: tuple of int listing contraction dimension of b (could be empty) :param args: a tuple of int containing the graph of a permutation of the output :return: """ # permute = tuple(range(len(dimfa) + len(dimfb) - 2 * len(contfa))) opt_arg = "" for intseq in (dimfa, dimfb, contfa, contfb) + args: opt_arg += "[" if isinstance(intseq, int): intseq = (intseq,) # convert to tuple for item in intseq: opt_arg += "{},".format(item) opt_arg = opt_arg[:-1] if len(intseq) else opt_arg # to remove last comma opt_arg += "], " opt_arg = opt_arg[:-2] # to remove last comma and space dimres = np.array(dimfa).prod() * np.array(dimfb).prod() dimres /= np.array(dimfa)[np.array(contfa)].prod() ** 2 if len(contfa) else 1 return self.binary( other, "TensorDot", dimres=int(dimres), dimcheck=None, opt_arg=opt_arg ) def grad(self, other, gradin): r""" Symbolic gradient operation. ``z = x.grad(v,e)`` returns a :class:`LazyTensor` which encodes, symbolically, the gradient (more precisely, the adjoint of the differential operator) of ``x``, with respect to variable ``v``, and applied to ``e``. For details, please check the documentation of the KeOps operation ``"Grad"`` in the :doc:`main reference page <../../../api/math-operations>`. """ return self.binary( gradin, "Grad", dimres=other.ndim, dimcheck="same", opt_arg=other, opt_pos="middle", ) # List of supported reductions ============================================ def sum(self, axis=-1, dim=None, **kwargs): r""" Summation unary operation, or Sum reduction. ``sum(axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the sum reduction of **self** over the "i" indexes. - if **axis or dim = 1**, return the sum reduction of **self** over the "j" indexes. - if **axis or dim = 2**, return a new :class:`LazyTensor` object representing the sum of the values of the vector **self**, Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), 1 (= reduction over :math:`j`) or 2 (i.e. -1, sum along the dimension of the vector variable). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ if dim is not None: axis = dim if axis in [-1, len(self._shape) - 1]: return self.unary("Sum", dimres=1) else: return self.reduction("Sum", axis=axis, **kwargs) def sum_reduction(self, axis=None, dim=None, **kwargs): r""" Sum reduction. ``sum_reduction(axis, dim, **kwargs)`` will return the sum reduction of **self**. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("Sum", axis=axis, dim=dim, **kwargs) def logsumexp(self, axis=None, dim=None, weight=None, **kwargs): r""" Log-Sum-Exp reduction. ``logsumexp(axis, dim, weight, **kwargs)`` will: - if **axis or dim = 0**, return the "log-sum-exp" reduction of **self** over the "i" indexes. - if **axis or dim = 1**, return the "log-sum-exp" reduction of **self** over the "j" indexes. For details, please check the documentation of the KeOps reductions ``LogSumExp`` and ``LogSumExpWeight`` in the :doc:`main reference page <../../../api/math-operations>`. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. weight (:class:`LazyTensor`): optional object that specifies scalar or vector-valued weights in the log-sum-exp operation **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ if weight is None: return self.reduction("LogSumExp", axis=axis, dim=dim, **kwargs) else: return self.reduction( "LogSumExp", other=weight, axis=axis, dim=dim, **kwargs ) def logsumexp_reduction(self, **kwargs): r""" Log-Sum-Exp reduction. Redirects to :meth:`logsumexp` method. """ return self.logsumexp(**kwargs) def sumsoftmaxweight(self, weight, axis=None, dim=None, **kwargs): r""" Sum of weighted Soft-Max reduction. ``sumsoftmaxweight(weight, axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the "sum of weighted Soft-Max" reduction of **self** over the "i" indexes. - if **axis or dim = 1**, return the "sum of weighted Soft-Max" reduction of **self** over the "j" indexes. For details, please check the documentation of the KeOps reduction ``SumSoftMaxWeight`` in the :doc:`main reference page <../../../api/math-operations>`. Keyword Args: weight (:class:`LazyTensor`): object that specifies scalar or vector-valued weights. axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction( "SumSoftMaxWeight", other=weight, axis=axis, dim=dim, **kwargs ) def sumsoftmaxweight_reduction(self, **kwargs): r""" Sum of weighted Soft-Max reduction. Redirects to :meth:`sumsoftmaxweight` method. """ return self.sumsoftmaxweight(**kwargs) def min(self, axis=-1, dim=None, **kwargs): r""" Minimum unary operation, or Min reduction. ``min(axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the min reduction of **self** over the "i" indexes. - if **axis or dim = 1**, return the min reduction of **self** over the "j" indexes. - if **axis or dim = 2**, return a new :class:`LazyTensor` object representing the min of the values of the vector **self**, Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), 1 (= reduction over :math:`j`) or 2 (i.e. -1, min along the dimension of the vector variable). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ if dim is not None: axis = dim if axis in [-1, len(self._shape) - 1]: return self.unary("Min", dimres=1) else: return self.reduction("Min", axis=axis, **kwargs) def min_reduction(self, axis=None, dim=None, **kwargs): r""" Min reduction. ``min_reduction(axis, dim, **kwargs)`` will return the min reduction of **self**. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("Min", axis=axis, dim=dim, **kwargs) def __min__(self, **kwargs): r""" Minimum unary operation, or Min reduction. Redirects to :meth:`min` method. """ return self.min(**kwargs) def argmin(self, axis=-1, dim=None, **kwargs): r""" ArgMin unary operation, or ArgMin reduction. ``argmin(axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the argmin reduction of **self** over the "i" indexes. - if **axis or dim = 1**, return the argmin reduction of **self** over the "j" indexes. - if **axis or dim = 2**, return a new :class:`LazyTensor` object representing the argmin of the values of the vector **self**, Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), 1 (= reduction over :math:`j`) or 2 (i.e. -1, argmin along the dimension of the vector variable). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ if dim is not None: axis = dim if axis in [-1, len(self._shape) - 1]: return self.unary("ArgMin", dimres=1) else: return self.reduction("ArgMin", axis=axis, **kwargs) def argmin_reduction(self, axis=None, dim=None, **kwargs): r""" ArgMin reduction. ``argmin_reduction(axis, dim, **kwargs)`` will return the argmin reduction of **self**. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("ArgMin", axis=axis, dim=dim, **kwargs) def min_argmin(self, axis=None, dim=None, **kwargs): r""" Min-ArgMin reduction. ``min_argmin(axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the minimal values and its indices of **self** over the "i" indexes. - if **axis or dim = 1**, return the minimal values and its indices of **self** over the "j" indexes. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("Min_ArgMin", axis=axis, dim=dim, **kwargs) def min_argmin_reduction(self, **kwargs): r""" Min-ArgMin reduction. Redirects to :meth:`min_argmin` method. """ return self.min_argmin(**kwargs) def max(self, axis=-1, dim=None, **kwargs): r""" Miaximum unary operation, or Max reduction. ``max(axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the max reduction of **self** over the "i" indexes. - if **axis or dim = 1**, return the max reduction of **self** over the "j" indexes. - if **axis or dim = 2**, return a new :class:`LazyTensor` object representing the max of the values of the vector **self**, Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), 1 (= reduction over :math:`j`) or 2 (i.e. -1, max along the dimension of the vector variable). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ if dim is not None: axis = dim if axis in [-1, len(self._shape) - 1]: return self.unary("Max", dimres=1) else: return self.reduction("Max", axis=axis, **kwargs) def max_reduction(self, axis=None, dim=None, **kwargs): r""" Max reduction. ``max_reduction(axis, dim, **kwargs)`` will return the max reduction of **self**. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("Max", axis=axis, dim=dim, **kwargs) def __max__(self, **kwargs): r""" Maximum unary operation, or Max reduction. Redirects to :meth:`max` method. """ return self.max(**kwargs) def argmax(self, axis=-1, dim=None, **kwargs): r""" ArgMax unary operation, or ArgMax reduction. ``argmax(axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the argmax reduction of **self** over the "i" indexes. - if **axis or dim = 1**, return the argmax reduction of **self** over the "j" indexes. - if **axis or dim = 2**, return a new :class:`LazyTensor` object representing the argmax of the values of the vector **self**, Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), 1 (= reduction over :math:`j`) or 2 (i.e. -1, argmax along the dimension of the vector variable). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ if dim is not None: axis = dim if axis in [-1, len(self._shape) - 1]: return self.unary("ArgMax", dimres=1) else: return self.reduction("ArgMax", axis=axis, **kwargs) def argmax_reduction(self, axis=None, dim=None, **kwargs): r""" ArgMax reduction. ``argmax_reduction(axis, dim, **kwargs)`` will return the argmax reduction of **self**. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("ArgMax", axis=axis, dim=dim, **kwargs) def max_argmax(self, axis=None, dim=None, **kwargs): r""" Max-ArgMax reduction. ``max_argmax(axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the maximal values and its indices of **self** over the "i" indexes. - if **axis or dim = 1**, return the maximal values and its indices of **self** over the "j" indexes. Keyword Args: axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("Max_ArgMax", axis=axis, dim=dim, **kwargs) def max_argmax_reduction(self, **kwargs): r""" Max-ArgMax reduction. Redirects to :meth:`max_argmax` method. """ return self.max_argmax(**kwargs) def Kmin(self, K, axis=None, dim=None, **kwargs): r""" K-Min reduction. ``Kmin(K, axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the K minimal values of **self** over the "i" indexes. - if **axis or dim = 1**, return the K minimal values of **self** over the "j" indexes. Keyword Args: K (integer): number of minimal values required axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("KMin", opt_arg=K, axis=axis, dim=dim, **kwargs) def Kmin_reduction(self, **kwargs): r""" Kmin reduction. Redirects to :meth:`Kmin` method. """ return self.Kmin(**kwargs) def argKmin(self, K, axis=None, dim=None, **kwargs): r""" argKmin reduction. ``argKmin(K, axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the indices of the K minimal values of **self** over the "i" indexes. - if **axis or dim = 1**, return the indices of the K minimal values of **self** over the "j" indexes. Keyword Args: K (integer): number of minimal values required axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("ArgKMin", opt_arg=K, axis=axis, dim=dim, **kwargs) def argKmin_reduction(self, **kwargs): r""" argKmin reduction. Redirects to :meth:`argKmin` method. """ return self.argKmin(**kwargs) def Kmin_argKmin(self, K, axis=None, dim=None, **kwargs): r""" K-Min-argK-min reduction. ``Kmin_argKmin(K, axis, dim, **kwargs)`` will: - if **axis or dim = 0**, return the K minimal values and its indices of **self** over the "i" indexes. - if **axis or dim = 1**, return the K minimal values and its indices of **self** over the "j" indexes. Keyword Args: K (integer): number of minimal values required axis (integer): reduction dimension, which should be equal to the number of batch dimensions plus 0 (= reduction over :math:`i`), or 1 (= reduction over :math:`j`). dim (integer): alternative keyword for the axis parameter. **kwargs: optional parameters that are passed to the :meth:`reduction` method. """ return self.reduction("KMin_ArgKMin", opt_arg=K, axis=axis, dim=dim, **kwargs) def Kmin_argKmin_reduction(self, **kwargs): r""" Kmin_argKmin reduction. Redirects to :meth:`Kmin_argKmin` method. """ return self.Kmin_argKmin(**kwargs) # LazyTensors as linear operators ========================================= def __matmul__(self, v, **kwargs): r""" Matrix-vector or Matrix-matrix product, supporting batch dimensions. If ``K`` is a :class:`LazyTensor` whose trailing dimension ``K._shape[-1]`` is equal to 1, we can understand it as a linear operator and apply it to arbitrary NumPy arrays or PyTorch Tensors. Assuming that ``v`` is a 1D (resp. ND) tensor such that ``K.shape[-1] == v.shape[-1]`` (resp. ``v.shape[-2]``), ``K @ v`` denotes the matrix-vector (resp. matrix-matrix) product between the two objects, encoded as a vanilla NumPy or PyTorch 1D (resp. ND) tensor. Example: >>> x, y = torch.randn(1000, 3), torch.randn(2000, 3) >>> x_i, y_j = LazyTensor( x[:,None,:] ), LazyTensor( y[None,:,:] ) >>> K = (- ((x_i - y_j)**2).sum(2) ).exp() # Symbolic (1000,2000,1) Gaussian kernel matrix >>> v = torch.rand(2000, 2) >>> print( (K @ v).shape ) ... torch.Size([1000, 2]) """ if self._shape[-1] != 1: raise ValueError( "The 'K @ v' syntax is only supported for LazyTensors " + "'K' whose trailing dimension is equal to 1. Here, K.shape = {}.".format( self.shape ) ) if len(v.shape) == 1: newdims = (1, v.shape[0], 1) else: newdims = v.shape[:-2] + (1,) + v.shape[-2:] v_ = self.lt_constructor(self.tools.view(v, newdims)) Kv = self * v_ # Supports broadcasting Kv = Kv.sum(Kv.dim() - 2, **kwargs) # Matrix-vector or Matrix-matrix product # Expected behavior: if v is a vector, so should K @ v. return self.tools.view(Kv, -1) if len(v.shape) == 1 else Kv def t(self): r""" Matrix transposition, permuting the axes of :math:`i`- and :math:`j`-variables. For instance, if ``K`` is a LazyTensor of shape ``(B,M,N,D)``, ``K.t()`` returns a symbolic copy of ``K`` whose axes 1 and 2 have been switched with each other: ``K.t().shape == (B,N,M,D)``. Example: >>> x, y = torch.randn(1000, 3), torch.randn(2000, 3) >>> x_i, y_j = LazyTensor( x[:,None,:] ), LazyTensor( y[None,:,:] ) >>> K = (- (( x_i - y_j )**2).sum(2) ).exp() # Symbolic (1000,2000) Gaussian kernel matrix >>> K_ = (- ((x[:,None,:] - y[None,:,:])**2).sum(2) ).exp() # Explicit (1000,2000) Gaussian kernel matrix >>> w = torch.rand(1000, 2) >>> print( (K.t() @ w - K_.t() @ w).abs().mean() ) ... tensor(1.7185e-05) """ res = copy.copy(self) res.ni, res.nj = res.nj, res.ni # Switch the "M" and "N" dimensions res.ranges = res.tools.swap_axes(res.ranges) if res.axis == 0: res.axis = 1 elif res.axis == 1: res.axis = 0 if res.formula is not None: # Switch variables with CAT=0 and CAT=1 res.formula = re.sub( r"(Var|VarSymb)\((\d+),(\d+),0\)", r"\1(\2,\3,i)", res.formula ) res.formula = re.sub( r"(Var|VarSymb)\((\d+),(\d+),1\)", r"\1(\2,\3,0)", res.formula ) res.formula = re.sub( r"(Var|VarSymb)\((\d+),(\d+),i\)", r"\1(\2,\3,1)", res.formula ) if res.formula2 is not None: # Switch variables with CAT=0 and CAT=1 res.formula2 = re.sub( r"(Var|VarSymb)\((\d+),(\d+),0\)", r"\1(\2,\3,i)", res.formula2 ) res.formula2 = re.sub( r"(Var|VarSymb)\((\d+),(\d+),1\)", r"\1(\2,\3,0)", res.formula2 ) res.formula2 = re.sub( r"(Var|VarSymb)\((\d+),(\d+),i\)", r"\1(\2,\3,1)", res.formula2 ) # we need also to make copies of references for all variables in the formula # that were switched newvars = [] for x in self.variables: if type(x) == list: # here we are dealing with a parameter variable, so no need to do any copy newvars.append(x) else: y = self.tools.view(x, x.shape) newvars.append(y) # now we replace all occurrences of old ids by new ids in formulas if res.formula is not None: res.formula = re.sub( r"(Var|VarSymb)\({},(\d+),(\d+)\)".format(id(x)), r"\1({},\2,\3)".format(id(y)), res.formula, ) if res.formula2 is not None: res.formula2 = re.sub( r"(Var|VarSymb)\({},(\d+),(\d+)\)".format(id(x)), r"\1({},\2,\3)".format(id(y)), res.formula2, ) res.variables = tuple(newvars) return res @property def T(self): r""" Numpy-friendly alias for the matrix transpose ``self.t()``. """ return self.t() def matvec(self, v): r""" Alias for the matrix-vector product, added for compatibility with :mod:`scipy.sparse.linalg`. If ``K`` is a :class:`LazyTensor` whose trailing dimension ``K._shape[-1]`` is equal to 1, we can understand it as a linear operator and wrap it into a :mod:`scipy.sparse.linalg.LinearOperator` object, thus getting access to robust solvers and spectral routines. Example: >>> import numpy as np >>> x = np.random.randn(1000,3) >>> x_i, x_j = LazyTensor( x[:,None,:] ), LazyTensor( x[None,:,:] ) >>> K_xx = (- ((x_i - x_j)**2).sum(2) ).exp() # Symbolic (1000,1000) Gaussian kernel matrix >>> from scipy.sparse.linalg import eigsh, aslinearoperator >>> eigenvalues, eigenvectors = eigsh( aslinearoperator( K_xx ), k=5 ) >>> print(eigenvalues) ... [ 35.5074527 59.01096445 61.35075268 69.34038814 123.77540277] >>> print( eigenvectors.shape) ... (1000, 5) """ return self @ v def rmatvec(self, v): r""" Alias for the transposed matrix-vector product, added for compatibility with :mod:`scipy.sparse.linalg`. See :meth:`matvec` for further reference. """ return self.T @ v def real2complex(self): r""" Element-wise "real 2 complex" operation - a unary operation. ``x.real2complex()`` returns a :class:`ComplexLazyTensor` that encodes, symbolically, the same tensor as ``x``, but seen as complex-valued (with zero imaginary part for each coefficient) """ return self.unary("Real2Complex", dimres=2 * self._shape[-1], is_complex=True) def imag2complex(self): r""" Element-wise "imag 2 complex" operation - a unary operation. ``x.real2complex()`` returns a :class:`ComplexLazyTensor` that encodes, symbolically, the multiplication of ``1j`` with ``x``. """ return self.unary("Imag2Complex", dimres=2 * self._shape[-1], is_complex=True) def exp1j(self): r""" Element-wise "complex exponential of 1j x" operation - a unary operation. ``x.exp1j()`` returns a :class:`ComplexLazyTensor` that encodes, symbolically, the complex exponential of ``1j*x``. """ return self.unary("ComplexExp1j", dimres=2 * self._shape[-1], is_complex=True) class ComplexGenericLazyTensor(GenericLazyTensor): r"""Extension of the LazyTensor class for complex operations.""" def __init__(self, x=None, axis=None): r"""Creates a KeOps symbolic variable of complex dtype.""" self.get_tools() if type(x) == complex: x = [x] if type(x) == list: x_ = [None] * (2 * len(x)) for i in range(len(x)): x_[2 * i] = x[i].real x_[2 * i + 1] = x[i].imag x = x_ elif self.tools.is_tensor(x): x = self.tools.view_as_real(x) super().__init__(x=x, axis=axis) self.is_complex = True def __call__(self, *args, **kwargs): res = super().__call__(*args, **kwargs) return self.tools.view_as_complex(res) @property def dtype(self): if self._dtype == "float32": return "complex64" elif self._dtype == "float64": return "complex128" @property def shape(self): r"""returns the shape of the complex LazyTensor.""" s = super()._shape s = s[:-1] + (s[-1] // 2,) if s[-1] == 1: return s[:-1] else: return s # List of supported operations ============================================ @property def real(self): r""" Element-wise real part of complex - a unary operation. ``z.real`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise real part of ``z``. """ return self.unary("ComplexReal", dimres=self._shape[-1] // 2, is_complex=False) @property def imag(self): r""" Element-wise imaginary part of complex - a unary operation. ``z.imag`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise imaginary part of ``z``. """ return self.unary("ComplexImag", dimres=self._shape[-1] // 2, is_complex=False) def angle(self): r""" Element-wise angle (or argument) of complex - a unary operation. ``z.angle()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise angle of ``z``. """ return self.unary("ComplexAngle", dimres=self._shape[-1] // 2, is_complex=False) def conj(self): r""" Element-wise complex conjugate - a unary operation. ``z.conj()`` returns a :class:`ComplexLazyTensor` that encodes, symbolically, the element-wise complex conjugate of ``z``. """ return self.unary("Conj", dimres=self._shape[-1], is_complex=True) def sum(self, axis=-1, dim=None, **kwargs): if dim is not None: axis = dim if axis in [-1, len(self._shape) - 1]: return self.unary("ComplexSum", dimres=2, is_complex=True) else: return self.reduction("Sum", axis=axis, **kwargs) def __abs__(self): r""" Element-wise absolute value (or modulus) of complex - a unary operation. ``z.abs()`` returns a :class:`LazyTensor` that encodes, symbolically, the element-wise absolute value of ``z``. """ return self.unary("ComplexAbs", dimres=self._shape[-1] // 2, is_complex=False) def exp(self): r""" Element-wise complex exponential - a unary operation. ``z.exp()`` returns a :class:`ComplexLazyTensor` that encodes, symbolically, the element-wise complex exponential of ``z``. """ return self.unary("ComplexExp", dimres=self._shape[-1], is_complex=True) def mulop(self, other, **kwargs): if other._shape[-1] == 1: return other.binary(self, "ComplexRealScal", **kwargs, is_complex=True) elif not is_complex_lazytensor(other): return self.mulop(other.real2complex()) else: return self.binary( other, "ComplexMult", **kwargs, is_complex=True, dimcheck=None ) def addop(self, other, **kwargs): if not is_complex_lazytensor(other): return self.addop(other.real2complex()) else: return self.binary( other, "ComplexAdd", **kwargs, is_complex=True, dimcheck=None ) def subop(self, other, **kwargs): if not is_complex_lazytensor(other): return self.subop(other.real2complex()) else: return self.binary( other, "ComplexSubtract", **kwargs, is_complex=True, dimcheck=None ) def divop(self, other, **kwargs): if not is_complex_lazytensor(other): return self.divop(other.real2complex()) else: return self.binary( other, "ComplexDivide", **kwargs, is_complex=True, dimcheck=None ) def real2complex(self): raise ValueError("real2complex cannot be applied to a complex LazyTensor.") def imag2complex(self): raise ValueError("imag2complex cannot be applied to a complex LazyTensor.") def exp1j(self): raise ValueError("exp1j cannot be applied to a complex LazyTensor.") def __call__(self, *args, **kwargs): res = super().__call__(*args, **kwargs) return self.tools.view_as_complex(res)
keops-main
pykeops/pykeops/common/lazy_tensor.py
import re from collections import OrderedDict from pykeops.common.utils import pyKeOps_Message categories = OrderedDict([("Vi", 0), ("Vj", 1), ("Pm", 2)]) def complete_aliases(formula, aliases): """ This function parse formula (a string) to find pattern like 'Var(x,x,x)'. It then append to aliases (list of strings), the extra 'Var(x,x,x)'. """ # first we detect all instances of Var(*,*,*) in formula. # These may be extra variables that are not listed in the aliases extravars = re.findall(r"Var\([0-9]+,[0-9]+,[0-9]+\)", formula.replace(" ", "")) # we get unicity extravars = list(set(extravars)) # now we loop through extravars newind = () # this will give the indices in extravars list of new variables newpos = () # this will give the indices in aliases list of new variables for (ind, var) in enumerate(extravars): # we get the "position" of the variable as the first integer value in the string # (i.e. the "a" in "Var(a,b,c)") pos = int(re.search(r"[0-9]+", var).group(0)) if pos < len(aliases): # this means that in fact var is not an extra variable, it is already in the list of aliases # We could check that the "dimension" and "category" are consistent, but we assume here # that the formula is consistent. The check will be done in the C++ code. pass else: # we need to append var to aliases, but with correct position, so we first record the indices newind += (ind,) newpos += (pos,) # finally we append the new variables with correct ordering to the aliases list. We assume here again # that formula is consistent, more precisely # that pos is a permutation of len(aliases):len(aliases)+len(newind) aliases += [None] * len(newind) for i in range(len(newind)): aliases[newpos[i]] = extravars[newind[i]] return aliases def get_sizes(aliases, *args): nx, ny = None, None for (var_ind, sig) in enumerate(aliases): _, cat, dim, pos = get_type(sig, position_in_list=var_ind) if cat == 0: nx = args[pos].shape[-2] elif cat == 1: ny = args[pos].shape[-2] if (nx is not None) and (ny is not None): return nx, ny # At this point, we know that our formula is degenerate, # with no "x" or no "y" variable. The sensible behavior is to assume that # the corresponding "empty" dimension is equal to 1, # in accordance with the dimcheck of keops_io.h: TODO: check the new Size ! if nx is None: nx = 1 if ny is None: ny = 1 return nx, ny def get_type(type_str, position_in_list=None): """ Get the type of the variable declared in type_str. :param type_str: is a string of the form "var = Xy(dim)" or "var = Xy(pos,dim)" or "Xy(pos,dim)" or "Xy(dim)" with Xy being either Vi, Vj, Vx, Vy, or Pm, or "var = Var(pos,dim,cat)" (N.B. Vx and Vy are equivalent to resp. Vi and Vj and kept for backward compatibility) :param position_in_list: an optional integer used if the position is not given in type_str (ie is of the form "var = Xy(dim)" or "Xy(dim)") :return: name : a string (here "var"), cat : an int (0,1 or 2), dim : an int """ # switch old Vx Vy syntax to Vi Vj if ("Vx" in type_str) or ("Vy" in type_str): type_str = type_str.replace("Vx", "Vi") type_str = type_str.replace("Vy", "Vj") import warnings warnings.warn("'Vx' and 'Vy' variables types are now renamed 'Vi' and 'Vj'") m = re.match( r"([a-zA-Z_][a-zA-Z_0-9]*)=(Vi|Vj|Pm)\(([0-9]*?),?([0-9]*)\)", type_str.replace(" ", ""), ) if m is None: m = re.match(r"(Vi|Vj|Pm)\(([0-9]*?),?([0-9]*)\)", type_str.replace(" ", "")) if m is None: m = re.match( r"Var\(([0-9]*?),?([0-9]*),?([0-9]*)\)", type_str.replace(" ", "") ) if m is None: raise ValueError( type_str + " type_str does not match the 'var = [Vi|Vj|Pm](dim)' or 'var = [Vi|Vj|Pm](pos,dim) or '[Vi|Vj|Pm](dim) or '[Vi|Vj|Pm](pos,dim) or Var(pos,dim,cat)' format: " + type_str ) else: # output: varname, cat , dim , pos return None, int(m.group(3)), int(m.group(2)), int(m.group(1)) else: # Try to infer position if m.group(2): pos = int(m.group(2)) elif position_in_list is not None: pos = int(position_in_list) else: pos = None # output: varname, cat , dim , pos return None, categories[m.group(1)], int(m.group(3)), pos else: # Try to infer position if m.group(3): pos = int(m.group(3)) elif position_in_list is not None: pos = int(position_in_list) else: pos = None # output: varname, cat , dim , pos return m.group(1), categories[m.group(2)], int(m.group(4)), pos def get_optional_flags( reduction_op_internal, dtype_acc, use_double_acc, sum_scheme, enable_chunks ): # 1. Options for accuracy if dtype_acc != "auto" and use_double_acc: raise ValueError( pyKeOps_Message("you cannot set both options use_double_acc and dtype_acc.") ) if use_double_acc: dtype_acc = "float64" if dtype_acc != "auto" and reduction_op_internal not in ( "Sum", "Max_SumShiftExp", "Max_SumShiftExpWeight", ): raise ValueError( pyKeOps_Message( "parameter dtype_acc should be set to 'auto' for no-sum type reductions (Min, Max, ArgMin, etc.)" ) ) if sum_scheme == "auto": if reduction_op_internal in ("Sum", "Max_SumShiftExp", "Max_SumShiftExpWeight"): sum_scheme = "block_sum" else: sum_scheme = "direct_sum" if sum_scheme == "block_sum": if reduction_op_internal not in ( "Sum", "Max_SumShiftExp", "Max_SumShiftExpWeight", ): raise ValueError( pyKeOps_Message( 'sum_scheme="block_sum" is only valid for sum type reductions.' ) ) elif sum_scheme == "kahan_scheme": if reduction_op_internal not in ( "Sum", "Max_SumShiftExp", "Max_SumShiftExpWeight", ): raise ValueError( pyKeOps_Message( 'sum_scheme="kahan_scheme" is only valid for sum type reductions.' ) ) elif sum_scheme != "direct_sum": raise ValueError( pyKeOps_Message( 'invalid value for option sum_scheme : should be one of "auto", "direct_sum", "block_sum" or "kahan_scheme".' ) ) optional_flags = dict() optional_flags["dtype_acc"] = dtype_acc optional_flags["sum_scheme"] = sum_scheme # 2. Option for chunk mode if enable_chunks: optional_flags["enable_chunks"] = 1 else: optional_flags["enable_chunks"] = 0 return optional_flags def parse_dtype_acc(dtype_acc, dtype): if dtype_acc == "auto": dtype_acc = dtype if dtype == "float32" and dtype_acc not in ("float32", "float64"): raise ValueError( "[KeOps] invalid parameter dtype_acc : should be either 'float32' or 'float64' when dtype is 'float32'" ) elif dtype == "float16" and dtype_acc not in ("float16", "float32"): raise ValueError( "[KeOps] invalid parameter dtype_acc : should be either 'float16' or 'float32' when dtype is 'float16'" ) elif dtype == "float64" and dtype_acc not in "float64": raise ValueError( "[KeOps] invalid parameter dtype_acc : should be 'float64' when dtype is 'float64'" ) if dtype_acc == "float64": dtype_acc = "double" elif dtype_acc == "float32": if dtype == "float16": dtype_acc = "float2" else: dtype_acc = "float" elif dtype_acc == "float16": dtype_acc = "half2" else: raise ValueError( '[KeOps] invalid value for option dtype_acc : should be one of "auto", "float16", "float32" or "float64".' ) return dtype_acc
keops-main
pykeops/pykeops/common/parse_type.py
keops-main
pykeops/pykeops/common/__init__.py
import numpy as np from pykeops.common.utils import get_tools # Some advance operations defined at user level use in fact other reductions. def preprocess(reduction_op, formula2): reduction_op = reduction_op if ( reduction_op == "SumSoftMaxWeight" or reduction_op == "SoftMax" ): # SoftMax is just old naming for SumSoftMaxWeight # SumSoftMaxWeight relies on KeOps Max_SumShiftExpWeight reduction, with a custom finalize reduction_op_internal = "Max_SumShiftExpWeight" # we concatenate the 2nd formula (g) with a constant 1, so that we get sum_j exp(m_i-f_ij) g_ij and sum_j exp(m_i-f_ij) together formula2 = "Concat(IntCst(1)," + formula2 + ")" elif reduction_op == "LogSumExp": # LogSumExp relies also on Max_SumShiftExp or Max_SumShiftExpWeight reductions, with a custom finalize if formula2: # here we want to compute a log-sum-exp with weights: log(sum_j(exp(f_ij)g_ij)) reduction_op_internal = "Max_SumShiftExpWeight" else: # here we want to compute a usual log-sum-exp: log(sum_j(exp(f_ij))) reduction_op_internal = "Max_SumShiftExp" else: reduction_op_internal = reduction_op return reduction_op_internal, formula2 def postprocess(out, binding, reduction_op, nout, opt_arg, dtype): tools = get_tools(binding) # Post-processing of the output: if reduction_op == "SumSoftMaxWeight" or reduction_op == "SoftMax": # we compute sum_j exp(f_ij) g_ij / sum_j exp(f_ij) from sum_j exp(m_i-f_ij) [1,g_ij] out = out[..., 2:] / out[..., 1][..., None] elif reduction_op == "ArgMin" or reduction_op == "ArgMax": # outputs are encoded as floats but correspond to indices, so we cast to integers out = tools.long(out) elif ( reduction_op == "Min_ArgMin" or reduction_op == "MinArgMin" or reduction_op == "Max_ArgMax" or reduction_op == "MaxArgMax" ): # output is one array of size N x 2D, giving min and argmin value for each dimension. # We convert to one array of floats of size NxD giving mins, and one array of size NxD giving argmins (casted to integers) shape_out = out.shape tmp = tools.view(out, shape_out[:-1] + (2, -1)) vals = tmp[..., 0, :] indices = tmp[..., 1, :] out = (vals, tools.long(indices)) elif reduction_op == "KMin": # output is of size N x KD giving K minimal values for each dim. We convert to array of size N x K x D shape_out = out.shape out = tools.view(out, shape_out[:-1] + (opt_arg, -1)) if out.shape[-1] == 1: out = out.squeeze(-1) elif reduction_op == "ArgKMin": # output is of size N x KD giving K minimal values for each dim. We convert to array of size N x K x D # and cast to integers shape_out = out.shape out = tools.view(tools.long(out), shape_out[:-1] + (opt_arg, -1)) if out.shape[-1] == 1: out = out.squeeze(-1) elif reduction_op == "KMin_ArgKMin" or reduction_op == "KMinArgKMin": # output is of size N x 2KD giving K min and argmin for each dim. We convert to 2 arrays of size N x K x D # and cast to integers the second array shape_out = out.shape out = tools.view(out, shape_out[:-1] + (opt_arg, 2, -1)) out = (out[..., 0, :], tools.long(out[..., 1, :])) if out[0].shape[-1] == 1: out = (out[0].squeeze(-1), out[1].squeeze(-1)) elif reduction_op == "LogSumExp": # finalize the log-sum-exp computation as m + log(s) if out.shape[-1] == 2: # means (m,s) with m scalar and s scalar out = (out[..., 0] + tools.log(out[..., 1]))[..., None] else: # here out.shape[-1]>2, means (m,s) with m scalar and s vectorial out = out[..., 0][..., None] + tools.log(out[..., 1:]) return out def ConjugateGradientSolver(binding, linop, b, eps=1e-6): # Conjugate gradient algorithm to solve linear system of the form # Ma=b where linop is a linear operation corresponding # to a symmetric and positive definite matrix tools = get_tools(binding) delta = tools.size(b) * eps**2 a = 0 r = tools.copy(b) nr2 = (r**2).sum() if nr2 < delta: return 0 * r p = tools.copy(r) k = 0 while True: Mp = linop(p) alp = nr2 / (p * Mp).sum() a += alp * p r -= alp * Mp nr2new = (r**2).sum() if nr2new < delta: break p = r + (nr2new / nr2) * p nr2 = nr2new k += 1 return a def KernelLinearSolver( binding, K, x, b, alpha=0, eps=1e-6, precond=False, precondKernel=None ): tools = get_tools(binding) dtype = tools.dtype(x) def PreconditionedConjugateGradientSolver(linop, b, invprecondop, eps=1e-6): # Preconditioned conjugate gradient algorithm to solve linear system of the form # Ma=b where linop is a linear operation corresponding # to a symmetric and positive definite matrix # invprecondop is linear operation corresponding to the inverse of the preconditioner matrix a = 0 r = tools.copy(b) z = invprecondop(r) p = tools.copy(z) rz = (r * z).sum() k = 0 while True: alp = rz / (p * linop(p)).sum() a += alp * p r -= alp * linop(p) if (r**2).sum() < eps**2: break z = invprecondop(r) rznew = (r * z).sum() p = z + (rznew / rz) * p rz = rznew k += 1 return a def NystromInversePreconditioner(K, Kspec, x, alpha): N, D = x.shape m = int(np.sqrt(N)) ind = np.random.choice(range(N), m, replace=False) u = x[ind, :] M = K(u, u) + Kspec( tools.tile(u, (m, 1)), tools.tile(u, (1, m)).reshape(-1, D), x ).reshape(m, m) def invprecondop(r): a = tools.solve(M, K(u, x, r)) return (r - K(x, u, a)) / alpha return invprecondop def KernelLinOp(a): return K(x, x, a) + alpha * a def GaussKernel(D, Dv, sigma): formula = "Exp(-oos2*SqDist(x,y))*b" variables = [ "x = Vi(" + str(D) + ")", # First arg : i-variable, of size D "y = Vj(" + str(D) + ")", # Second arg : j-variable, of size D "b = Vj(" + str(Dv) + ")", # Third arg : j-variable, of size Dv "oos2 = Pm(1)", ] # Fourth arg : scalar parameter my_routine = tools.Genred( formula, variables, reduction_op="Sum", axis=1, dtype=dtype ) oos2 = tools.array([1.0 / sigma**2], dtype=dtype) KernelMatrix = GaussKernelMatrix(sigma) def K(x, y, b=None): if b is None: return KernelMatrix(x, y) else: return my_routine(x, y, b, oos2) return K def GaussKernelNystromPrecond(D, sigma): formula = "Exp(-oos2*(SqDist(u,x)+SqDist(v,x)))" variables = [ "u = Vi(" + str(D) + ")", # First arg : i-variable, of size D "v = Vi(" + str(D) + ")", # Second arg : i-variable, of size D "x = Vj(" + str(D) + ")", # Third arg : j-variable, of size D "oos2 = Pm(1)", ] # Fourth arg : scalar parameter my_routine = tools.Genred( formula, variables, reduction_op="Sum", axis=1, dtype=tools.dtypename(dtype) ) oos2 = tools.array([1.0 / sigma**2], dtype=dtype) KernelMatrix = GaussKernelMatrix(sigma) def K(u, v, x): return my_routine(u, v, x, oos2) return K def GaussKernelMatrix(sigma): oos2 = 1.0 / sigma**2 def f(x, y): D = x.shape[1] sqdist = 0 for k in range(D): sqdist += (x[:, k][:, None] - tools.transpose(y[:, k][:, None])) ** 2 return tools.exp(-oos2 * sqdist) return f if type(K) == tuple: if K[0] == "gaussian": D = K[1] Dv = K[2] sigma = K[3] K = GaussKernel(D, Dv, sigma) if precond: precondKernel = GaussKernelNystromPrecond(D, sigma) if precond: invprecondop = NystromInversePreconditioner(K, precondKernel, x, alpha) a = PreconditionedConjugateGradientSolver(KernelLinOp, b, invprecondop, eps) else: a = ConjugateGradientSolver(binding, KernelLinOp, b, eps=eps) return a
keops-main
pykeops/pykeops/common/operations.py
import fcntl import functools import importlib.util import os import pykeops.config c_type = dict(float16="half2", float32="float", float64="double") def axis2cat(axis): """ Axis is the dimension to sum (the pythonic way). Cat is the dimension that remains at the end (the Keops way). :param axis: 0 or 1 :return: cat: 1 or 0 """ if axis in [0, 1]: return (axis + 1) % 2 else: raise ValueError("Axis should be 0 or 1.") def cat2axis(cat): """ Axis is the dimension to sum (the pythonic way). Cat is the dimension that remains at the end (the Keops way). :param cat: 0 or 1 :return: axis: 1 or 0 """ if cat in [0, 1]: return (cat + 1) % 2 else: raise ValueError("Category should be Vi or Vj.") def get_tools(lang): """ get_tools is used to simulate template as in Cpp code. Depending on the langage it import the right classes. :param lang: a string with the langage ('torch'/'pytorch' or 'numpy') :return: a class tools """ if lang == "numpy": from pykeops.numpy.utils import numpytools tools = numpytools() elif lang == "torch" or lang == "pytorch": from pykeops.torch.utils import torchtools tools = torchtools() return tools def WarmUpGpu(lang): tools = get_tools(lang) # dummy first calls for accurate timing in case of GPU use my_routine = tools.Genred( "SqDist(x,y)", ["x = Vi(1)", "y = Vj(1)"], reduction_op="Sum", axis=1, dtype=tools.dtype, ) dum = tools.rand(10, 1) my_routine(dum, dum) my_routine(dum, dum) def max_tuple(a, b): return tuple(max(a_i, b_i) for (a_i, b_i) in zip(a, b)) def check_broadcasting(dims_1, dims_2): r""" Checks that the shapes **dims_1** and **dims_2** are compatible with each other. """ if dims_1 is None: return dims_2 if dims_2 is None: return dims_1 padded_dims_1 = (1,) * (len(dims_2) - len(dims_1)) + dims_1 padded_dims_2 = (1,) * (len(dims_1) - len(dims_2)) + dims_2 for (dim_1, dim_2) in zip(padded_dims_1, padded_dims_2): if dim_1 != 1 and dim_2 != 1 and dim_1 != dim_2: raise ValueError( "Incompatible batch dimensions: {} and {}.".format(dims_1, dims_2) ) return max_tuple(padded_dims_1, padded_dims_2) def pyKeOps_Message(message, use_tag=True, **kwargs): if pykeops.verbose: tag = "[pyKeOps] " if use_tag else "" message = tag + message print(message, **kwargs) def pyKeOps_Warning(message): if pykeops.verbose: message = "[pyKeOps] Warning : " + message print(message)
keops-main
pykeops/pykeops/common/utils.py
import re import numpy as np from collections import OrderedDict import pykeops import pykeops.config ############################################################ # define backend ############################################################ class SetBackend: """ This class is used to centralized the options used in PyKeops. """ dev = OrderedDict([("CPU", 0), ("GPU", 1)]) grid = OrderedDict([("1D", 0), ("2D", 1)]) memtype = OrderedDict([("host", 0), ("device", 1)]) possible_options_list = [ "auto", "CPU", "GPU", "GPU_1D", "GPU_1D_device", "GPU_1D_host", "GPU_2D", "GPU_2D_device", "GPU_2D_host", ] def define_tag_backend(self, backend, variables): """ Try to make a good guess for the backend... available methods are: (host means Cpu, device means Gpu) CPU : computations performed with the host from host arrays GPU_1D_device : computations performed on the device from device arrays, using the 1D scheme GPU_2D_device : computations performed on the device from device arrays, using the 2D scheme GPU_1D_host : computations performed on the device from host arrays, using the 1D scheme GPU_2D_host : computations performed on the device from host data, using the 2D scheme :param backend (str), variables (tuple) :return (tagCPUGPU, tag1D2D, tagHostDevice) """ # check that the option is valid if backend not in self.possible_options_list: raise ValueError( "Invalid backend. Should be one of ", self.possible_options_list ) # auto : infer everything if backend == "auto": return ( int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables), ) split_backend = re.split("_", backend) if len(split_backend) == 1: # CPU or GPU return ( self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables), ) elif len(split_backend) == 2: # GPU_1D or GPU_2D return ( self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables), ) elif len(split_backend) == 3: # the option is known return ( self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]], ) def define_backend(self, backend, variables): tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def _find_dev(): return int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables): if all( [type(var) is np.ndarray for var in variables] ): # Infer if we're working with numpy arrays or torch tensors: MemType = 0 elif pykeops.config.torch_found: import torch if all( [ type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables ] ): from pykeops.torch.utils import is_on_device VarsAreOnGpu = tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu): MemType = 1 elif not any(VarsAreOnGpu): MemType = 0 else: raise ValueError( "At least two input variables have different memory locations (Cpu/Gpu)." ) else: raise TypeError( "All variables should either be numpy arrays or torch tensors." ) else: raise TypeError( "All variables should either be numpy arrays or torch tensors." ) return MemType @staticmethod def _find_grid(): return 0 def get_tag_backend(backend, variables, str=False): """ entry point to get the correct backend """ res = SetBackend() if not str: return res.define_tag_backend(backend, variables) else: return res.define_backend(backend, variables)
keops-main
pykeops/pykeops/common/get_options.py
from keopscore.utils.gpu_utils import get_gpu_props def get_gpu_number(): return get_gpu_props()[0]
keops-main
pykeops/pykeops/common/gpu_utils.py
import os import keopscore.config.config from keopscore.config.config import get_build_folder import pykeops from keopscore.binders.nvrtc.Gpu_link_compile import Gpu_link_compile from keopscore.utils.Cache import Cache_partial from pykeops.common.keops_io.LoadKeOps import LoadKeOps from pykeops.common.utils import pyKeOps_Message from keopscore.utils.misc_utils import KeOps_OS_Run class LoadKeOps_nvrtc_class(LoadKeOps): def __init__(self, *args, fast_init=False): super().__init__(*args, fast_init=fast_init) def init_phase2(self): import importlib pykeops_nvrtc = importlib.import_module("pykeops_nvrtc") if self.params.c_dtype == "float": self.launch_keops = pykeops_nvrtc.KeOps_module_float( self.params.device_id_request, self.params.nargs, self.params.low_level_code_file, ) elif self.params.c_dtype == "double": self.launch_keops = pykeops_nvrtc.KeOps_module_double( self.params.device_id_request, self.params.nargs, self.params.low_level_code_file, ) elif self.params.c_dtype == "half2": self.launch_keops = pykeops_nvrtc.KeOps_module_half2( self.params.device_id_request, self.params.nargs, self.params.low_level_code_file, ) def call_keops(self, nx, ny): self.launch_keops( self.params.tagHostDevice, self.params.dimy, nx, ny, self.params.tagI, self.params.tagZero, self.params.use_half, self.params.tag1D2D, self.params.dimred, self.params.cuda_block_size, self.params.use_chunk_mode, self.params.indsi, self.params.indsj, self.params.indsp, self.params.dim, self.params.dimsx, self.params.dimsy, self.params.dimsp, self.ranges_ptr_new, self.outshape, self.out_ptr, self.args_ptr_new, self.argshapes_new, ) def import_module(self): return self def compile_jit_binary(): """ This function compile the main .so entry point to keops_nvrt binder... """ compile_command = Gpu_link_compile.get_compile_command( extra_flags=pykeops.config.python_includes, sourcename=pykeops.config.pykeops_nvrtc_name(type="src"), dllname=pykeops.config.pykeops_nvrtc_name(type="target"), ) pyKeOps_Message("Compiling nvrtc binder for python ... ", flush=True, end="") KeOps_OS_Run(compile_command) pyKeOps_Message("OK", use_tag=False, flush=True) LoadKeOps_nvrtc = Cache_partial( LoadKeOps_nvrtc_class, use_cache_file=True, save_folder=get_build_folder(), )
keops-main
pykeops/pykeops/common/keops_io/LoadKeOps_nvrtc.py
import types from functools import reduce import numpy as np from keopscore.get_keops_dll import get_keops_dll from pykeops.common.parse_type import parse_dtype_acc class LoadKeOps: null_range = np.array([-1], dtype="int32") empty_ranges_new = tuple([null_range.__array_interface__["data"][0]] * 7) def __init__(self, *args, fast_init): if fast_init: self.params = args[0] else: self.init(*args) self.dimout = self.params.dim self.tagIJ = self.params.tagI if self.params.lang == "torch": from pykeops.torch.utils import torchtools self.tools = torchtools elif self.params.lang == "numpy": from pykeops.numpy.utils import numpytools self.tools = numpytools self.init_phase2() def init( self, tagCPUGPU, tag1D2D, tagHostDevice, use_ranges, device_id_request, formula, aliases, nargs, dtype, lang, optional_flags, ): aliases_new = [] for k, alias in enumerate(aliases): alias = alias.replace(" ", "") if "=" in alias: varname, var = alias.split("=") if "Vi" in var: cat = 0 elif "Vj" in var: cat = 1 elif "Pm" in var: cat = 2 alias_args = var[3:-1].split(",") if len(alias_args) == 1: ind, dim = k, eval(alias_args[0]) elif len(alias_args) == 2: ind, dim = eval(alias_args[0]), eval(alias_args[1]) alias = f"{varname}=Var({ind},{dim},{cat})" aliases_new.append(alias) self.params = types.SimpleNamespace() self.params.aliases_old = aliases self.params.aliases = aliases_new self.params.lang = lang self.params.red_formula_string = formula self.params.dtype = dtype dtype_acc = optional_flags["dtype_acc"] dtype_acc = parse_dtype_acc(dtype_acc, dtype) self.params.c_dtype_acc = dtype_acc self.params.sum_scheme = optional_flags["sum_scheme"] self.params.enable_chunks = optional_flags["enable_chunks"] self.params.enable_final_chunks = -1 self.params.mult_var_highdim = optional_flags["multVar_highdim"] self.params.tagHostDevice = tagHostDevice if dtype == "float32": self.params.c_dtype = "float" self.params.use_half = False elif dtype == "float64": self.params.c_dtype = "double" self.params.use_half = False elif dtype == "float16": self.params.c_dtype = "half2" self.params.use_half = True else: raise ValueError("not implemented") if not self.params.c_dtype_acc: self.params.c_dtype_acc = self.params.c_dtype if tagCPUGPU == 0: map_reduce_id = "CpuReduc" else: map_reduce_id = "GpuReduc" map_reduce_id += "1D" if tag1D2D == 0 else "2D" if use_ranges: map_reduce_id += "_ranges" ( self.params.tag, self.params.source_name, self.params.low_level_code_file, self.params.tagI, self.params.tagZero, self.params.use_half, self.params.cuda_block_size, self.params.use_chunk_mode, self.params.tag1D2D, self.params.dimred, self.params.dim, self.params.dimy, indsi, indsj, indsp, dimsx, dimsy, dimsp, ) = get_keops_dll( map_reduce_id, self.params.red_formula_string, self.params.enable_chunks, self.params.enable_final_chunks, self.params.mult_var_highdim, self.params.aliases, nargs, self.params.c_dtype, self.params.c_dtype_acc, self.params.sum_scheme, self.params.tagHostDevice, tagCPUGPU, tag1D2D, self.params.use_half, device_id_request, ) # now we switch indsi, indsj and dimsx, dimsy in case tagI=1. # This is to be consistent with the convention used in the old # bindings (see functions GetIndsI, GetIndsJ, GetDimsX, GetDimsY # from file binder_interface.h. Clearly we could do better if we # carefully rewrite some parts of the code if self.params.tagI == 1: indsi, indsj = indsj, indsi dimsx, dimsy = dimsy, dimsx self.params.indsi = indsi self.params.indsj = indsj self.params.indsp = indsp self.params.dimsx = dimsx self.params.dimsy = dimsy self.params.dimsp = dimsp self.params.tagCPUGPU = tagCPUGPU self.params.device_id_request = device_id_request self.params.nargs = nargs self.params.reduction_op = self.params.red_formula_string.split("(")[0] self.params.axis = 1 - self.params.tagI self.init_phase1() def init_phase1(self): pass def init_phase2(self): pass def genred( self, device_args, ranges, nx, ny, nbatchdims, out, *args, ): if self.params.use_half: from pykeops.torch.half2_convert import preprocess_half2 args, ranges, tag_dummy, N = preprocess_half2( args, self.params.aliases_old, self.params.axis, ranges, nx, ny ) # get ranges argument if not ranges: self.ranges_ptr_new = self.empty_ranges_new else: ranges_shapes = self.tools.array( [r.shape[0] for r in ranges], dtype="int32", device="cpu" ) ranges = [*ranges, ranges_shapes] self.ranges_ptr_new = tuple([self.tools.get_pointer(r) for r in ranges]) self.args_ptr_new = tuple([self.tools.get_pointer(arg) for arg in args]) # get all shapes of arguments self.argshapes_new = tuple([arg.shape for arg in args]) # initialize output array M = nx if self.params.tagI == 0 else ny if self.params.use_half: M += M % 2 if nbatchdims: batchdims_shapes = [] for arg in args: batchdims_shapes.append(list(arg.shape[:nbatchdims])) tmp = reduce( np.maximum, batchdims_shapes ) # this is faster than np.max(..., axis=0) shapeout = tuple(tmp) + (M, self.params.dim) else: shapeout = (M, self.params.dim) if out is None: out = self.tools.empty(shapeout, dtype=args[0].dtype, device=device_args) self.out_ptr = self.tools.get_pointer(out) self.outshape = out.shape self.call_keops(nx, ny) if self.params.dtype == "float16": from pykeops.torch.half2_convert import postprocess_half2 out = postprocess_half2(out, tag_dummy, self.params.reduction_op, N) return out genred_pytorch = genred genred_numpy = genred def call_keops(self): pass def import_module(self): return self
keops-main
pykeops/pykeops/common/keops_io/LoadKeOps.py
import keopscore.config if keopscore.config.config.use_cuda: from . import LoadKeOps_nvrtc, LoadKeOps_cpp keops_binder = { "nvrtc": LoadKeOps_nvrtc.LoadKeOps_nvrtc, "cpp": LoadKeOps_cpp.LoadKeOps_cpp, } else: from . import LoadKeOps_cpp keops_binder = {"cpp": LoadKeOps_cpp.LoadKeOps_cpp}
keops-main
pykeops/pykeops/common/keops_io/__init__.py