diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..4b2c82d575a513a6cdb4ca5a8a9de4273756dbb2
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,39 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+checkpoints/** filter=lfs diff=lfs merge=lfs -text
+checkpoints/c2i_model_256.safetensors filter=lfs diff=lfs merge=lfs -text
+checkpoints/c2i_model_512.safetensors filter=lfs diff=lfs merge=lfs -text
+checkpoints/t2i_model.bin filter=lfs diff=lfs merge=lfs -text
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..524ad45a475c98f24cddbc7c4da16aac1a2ae225
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,183 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# UV
+# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+#uv.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+# Ruff stuff:
+.ruff_cache/
+
+# PyPI configuration file
+.pypirc
+
+
+*.json
+*.svg
+/workdir
+/datasets
+/wandb
+
+samples/
\ No newline at end of file
diff --git a/.python-version b/.python-version
new file mode 100644
index 0000000000000000000000000000000000000000..c8cfe3959183f8e9a50f83f54cd723f2dc9c252d
--- /dev/null
+++ b/.python-version
@@ -0,0 +1 @@
+3.10
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2de7982a34d0116406808deab3c9dabab044ea5d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,280 @@
+---
+title: TiM
+emoji: 🏆
+colorFrom: blue
+colorTo: red
+sdk: gradio
+sdk_version: 5.44.1
+app_file: app.py
+pinned: false
+---
+
+
Transition Models: Rethinking the Generative Learning Objective
+
+
+
+
+
+
+
+
+
+
+Highlights: We propose Transition Models (TiM), a novel generative model that learns to navigate the entire generative trajectory with unprecedented flexibility.
+* Our Transition Models (TiM) are trained to master arbitrary state-to-state transitions. This approach allows TiM to learn the entire solution manifold of the generative process, unifying the few-step and many-step regimes within a single, powerful model.
+ 
+* Despite having only 865M parameters, TiM achieves state-of-the-art performance, surpassing leading models such as SD3.5 (8B parameters) and FLUX.1 (12B parameters) across all evaluated step counts on GenEval benchmark. Importantly, unlike previous few-step generators, TiM demonstrates monotonic quality improvement as the sampling budget increases.
+ 
+* Additionally, when employing our native-resolution strategy, TiM delivers exceptional fidelity at resolutions up to $4096\times4096$.
+ 
+
+
+## 🚨 News
+
+- `2025-9-5` We are delighted to introduce TiM, which is the first text-to-image generator support any-step generation, entirely trained from scratch. We have released the codes and pretrained models of TiM.
+
+
+
+## 1. Setup
+
+First, clone the repo:
+```bash
+git clone https://github.com/WZDTHU/TiM.git && cd TiM
+```
+
+### 1.1 Environment Setup
+
+```bash
+conda create -n tim_env python=3.10
+pip install torch==2.5.1 torchvision==0.20.1 --index-url https://download.pytorch.org/whl/cu118
+pip install flash-attn
+pip install -r requirements.txt
+pip install -e .
+```
+
+
+### 1.2 Model Zoo (WIP)
+
+
+#### Text-to-Image Generation
+
+A single TiM model can perform any-step generation (one-step, few-step, and multi-step) and demonstrate monotonic quality improvement as the sampling budget increases.
+| Model | Model Zoo | Model Size | VAE | 1-NFE GenEval | 8-NFE GenEval | 128-NFE GenEval |
+|---------------|------------|---------|------------|-------|-------|-------|
+| TiM-T2I | [🤗 HF](https://huggingface.co/GoodEnough/TiM-T2I/blob/main/t2i_model.bin) | 865M | [DC-AE](https://huggingface.co/mit-han-lab/dc-ae-f32c32-sana-1.1-diffusers) | 0.67 | 0.76 | 0.83 |
+
+
+
+```bash
+mkdir checkpoints
+wget -c "https://huggingface.co/GoodEnough/TiM-T2I/resolve/main/t2i_model.bin" -O checkpoints/t2i_model.bin
+```
+
+
+#### Class-guided Image Generation:
+
+| Model | Model Zoo | Model Size | VAE | 2-NFE FID | 500-NFE FID |
+|---------------|------------|---------|------------|------------|------------|
+| TiM-C2I-256 | [🤗 HF](https://huggingface.co/GoodEnough/TiM-C2I/blob/main/c2i_model_256.safetensors) | 664M | [SD-VAE](https://huggingface.co/stabilityai/sd-vae-ft-ema) | 6.14 | 1.65
+| TiM-C2I-512 | [🤗 HF](https://huggingface.co/GoodEnough/TiM-C2I/blob/main/c2i_model_512.safetensors) | 664M | [DC-AE](https://huggingface.co/mit-han-lab/dc-ae-f32c32-sana-1.1-diffusers) | 4.79 | 1.69
+
+
+```bash
+mkdir checkpoints
+wget -c "https://huggingface.co/GoodEnough/TiM-C2I/resolve/main/c2i_model_256.safetensors" -O checkpoints/c2i_model_256.safetensors
+wget -c "https://huggingface.co/GoodEnough/TiM-C2I/resolve/main/c2i_model_512.safetensors" -O checkpoints/c2i_model_512.safetensors
+```
+
+
+## 2. Sampling
+
+#### Text-to-Image Generation
+
+We provide the sampling scripts on three benchmarks: GenEval, DPGBench, and MJHQ30K. You can specify the sampling steps, resolutions, and CFG scale in the corresponding scripts.
+
+Sampling with TiM-T2I model on GenEval benchmark:
+```bash
+bash scripts/sample/t2i/sample_t2i_geneval.sh
+```
+
+Sampling with TiM-T2I model on DPGBench benchmark:
+```bash
+bash scripts/sample/t2i/sample_t2i_dpgbench.sh
+```
+
+Sampling with TiM-T2I model on MJHQ30k benchmark:
+```bash
+bash scripts/sample/t2i/sample_t2i_mjhq30k.sh
+```
+
+#### Class-guided Image Generation
+
+We provide the sampling scripts for ImageNet-256 and ImageNet-512.
+
+Sampling with C2I model on $256\times256$ resolution:
+```bash
+bash scripts/sample/c2i/sample_256x256.sh
+```
+
+Sampling with C2I model on $512\times512$ resolution:
+```bash
+bash scripts/sample/c2i/sample_512x512.sh
+```
+
+
+## 3. Evaluation
+
+
+### Text-to-Image Generation
+
+#### GenEval
+
+Please follow the [GenEval](https://github.com/djghosh13/geneval) to setup the conda-environment.
+
+Given the directory of the generated images `SAMPLING_DIR` and folder of object dector `OBJECT_DETECTOR_FOLDER`, run the following codes:
+```bash
+python projects/evaluate/geneval/evaluation/evaluate_images.py $SAMPLING_DIR --outfile geneval_results.jsonl --model-path $OBJECT_DETECTOR_FOLDER
+```
+This will result in a JSONL file with each line corresponding to an image. Run the following codes to obtain the GenEval Score:
+```bash
+python projects/evaluate/geneval/evaluation/summary_scores.py geneval_results.jsonl
+```
+
+
+#### DPGBench
+Please follow the [DPGBench](https://github.com/TencentQQGYLab/ELLA) to setup the conda-environment.
+Given the directory of the generated images `SAMPLING_DIR` , run the following codes:
+```bash
+python projects/evaluate/dpg_bench/compute_dpg_bench.py --image-root-path $SAMPLING_DIR --res-path dpgbench_results.txt --pic-num 4
+```
+
+#### MJHQ30K
+Please download [MJHQ30K](https://huggingface.co/datasets/playgroundai/MJHQ-30K) as the reference-image.
+
+
+Given the directory of the reference-image direcotry `REFERENCE_DIR` and the directory of the generated images `SAMPLING_DIR`, run the following codes to calculate the FID Score:
+```bash
+python projects/evaluate/mjhq30k/calculate_fid.py $REFERENCE_DIR $SAMPLING_DIR
+```
+
+For CLIP Score, first compute the text features and save it in `MJHQ30K_TEXT_FEAT`:
+```bash
+python projects/evaluate/mjhq30k/calculate_clip.py projects/evaluate/mjhq30k/meta_data.json $MJHQ30K_TEXT_FEAT/clip_feat.safetensors --save-stats
+```
+Then run the following codes to calculate the CLIP Score:
+```bash
+python projects/evaluate/mjhq30k/calculate_clip.py $MJHQ30K_TEXT_FEAT/clip_feat.safetensors $SAMPLING_DIR
+```
+
+
+
+### Class-guided Image Generation
+
+The sampling generates a folder of samples to compute FID, Inception Score and other metrics.
+Note that we do not pack the generate samples as a `.npz` file, this does not affect the calculation of FID and other metrics.
+Please follow the [ADM's TensorFlow
+evaluation suite](https://github.com/openai/guided-diffusion/tree/main/evaluations)
+to setup the conda-environment and download the reference batch.
+
+```bash
+wget -c "https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/classify_image_graph_def.pb" -O checkpoints/classify_image_graph_def.pb
+```
+
+
+Given the directory of the reference batch `REFERENCE_DIR` and the directory of the generated images `SAMPLING_DIR`, run the following codes:
+```bash
+python projects/evaluate/adm_evaluator.py $REFERENCE_DIR $SAMPLING_DIR
+```
+
+
+
+
+
+## 4. Training
+
+### 4.1 Dataset Setup
+
+Currently, we provide all the [preprocessed dataset](https://huggingface.co/datasets/GoodEnough/NiT-Preprocessed-ImageNet1K) for ImageNet1K. Please use the following commands to download the preprocessed latents.
+
+```bash
+bash tools/download_imagenet_256x256.sh
+bash tools/download_imagenet_512x512.sh
+```
+
+For text-to-image generation, we provide a [toy dataset](https://huggingface.co/datasets/GoodEnough/TiM-Toy-T2I-Dataset). Please use the following command to download this dataset.
+```bash
+bash tools/download_toy_t2i_dataset.sh
+```
+
+
+### 4.2 Download Image Encoder
+
+We use RADIO-v2.5-b as our image encoder for REPA-loss.
+
+```bash
+wget -c "https://huggingface.co/nvidia/RADIO/resolve/main/radio-v2.5-b_half.pth.tar" -O checkpoints/radio-v2.5-b_half.pth.tar
+```
+
+
+### 4.3 Training Scripts
+
+Specify the `image_dir` in `configs/c2i/tim_b_p4.yaml` and train the base-model (131M) on ImageNet-256:
+```bash
+bash scripts/train/c2i/train_tim_c2i_b.sh
+```
+
+Specify the `image_dir` in `configs/c2i/tim_xl_p2_256.yaml` and train the XL-model (664M) on ImageNet-256:
+```bash
+bash scripts/train/c2i/train_tim_c2i_xl_256.sh
+```
+
+Specify the `image_dir` in `configs/c2i/tim_xl_p2_512.yaml` and train the XL-model (664M) on ImageNet-512:
+```bash
+bash scripts/train/c2i/train_tim_c2i_xl_512.sh
+```
+
+Specify the `root_dir` in `configs/t2i/tim_xl_p1_t2i.yaml` and train the T2I-model (865M) on Toy-T2I-Dataset:
+```bash
+bash scripts/train/t2i/train_tim_t2i.sh
+```
+
+
+
+
+## Citations
+If you find the project useful, please kindly cite:
+```bibtex
+@article{wang2025transition,
+ title={Transition Models: Rethinking the Generative Learning Objective},
+ author={Wang, Zidong and Zhang, Yiyuan and Yue, Xiaoyu and Yue, Xiangyu and Li, Yangguang and Ouyang, Wanli and Bai, Lei},
+ year={2025},
+ eprint={2509.04394},
+ archivePrefix={arXiv},
+ primaryClass={cs.LG}
+}
+```
+https://arxiv.org/abs/
+## License
+This project is licensed under the Apache-2.0 license.
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ca8f8355529ab32775e2b26dd1bd8580e6c3054
--- /dev/null
+++ b/app.py
@@ -0,0 +1,343 @@
+import gradio as gr
+import spaces # type: ignore - ZeroGPU spaces library
+import numpy as np
+import random
+import torch
+import functools
+from pathlib import Path
+from PIL import Image
+from omegaconf import OmegaConf # type: ignore - YAML configuration library
+from tim.schedulers.transition import TransitionSchedule
+from tim.utils.misc_utils import instantiate_from_config, init_from_ckpt
+from tim.models.vae import get_sd_vae, get_dc_ae, sd_vae_decode, dc_ae_decode
+from tim.models.utils.text_encoders import load_text_encoder, encode_prompt
+
+# Configuration
+dtype = torch.bfloat16
+device = "cuda" if torch.cuda.is_available() else "cpu"
+MAX_SEED = np.iinfo(np.int32).max
+MAX_IMAGE_SIZE = 2048
+
+# Global variables to store loaded components
+model = None
+scheduler = None
+text_encoder = None
+tokenizer = None
+decode_func = None
+null_cap_feat = None
+null_cap_mask = None
+config = None
+
+
+def load_model_components(device: str = "cuda"):
+ """Load all model components once at startup"""
+ global \
+ model, \
+ scheduler, \
+ text_encoder, \
+ tokenizer, \
+ decode_func, \
+ null_cap_feat, \
+ null_cap_mask, \
+ config
+
+ try:
+ # Load configuration
+ config_path = "configs/t2i/tim_xl_p1_t2i.yaml"
+ ckpt_path = "checkpoints/t2i_model.bin"
+
+ if not Path(config_path).exists():
+ raise FileNotFoundError(f"Config file not found: {config_path}")
+ if not Path(ckpt_path).exists():
+ raise FileNotFoundError(f"Checkpoint file not found: {ckpt_path}")
+
+ print("Loading configuration...")
+ config = OmegaConf.load(config_path)
+ model_config = config.model
+
+ print("Loading VAE...")
+ # Load VAE
+ if "dc-ae" in model_config.vae_dir:
+ dc_ae = get_dc_ae(model_config.vae_dir, dtype=torch.float32, device=device)
+ dc_ae.enable_tiling(2560, 2560, 2560, 2560)
+ decode_func = functools.partial(dc_ae_decode, dc_ae, slice_vae=True)
+ elif "sd-vae" in model_config.vae_dir:
+ sd_vae = get_sd_vae(
+ model_config.vae_dir, dtype=torch.float32, device=device
+ )
+ decode_func = functools.partial(sd_vae_decode, sd_vae, slice_vae=True)
+ else:
+ raise ValueError("Unsupported VAE type")
+
+ print("Loading text encoder...")
+ # Load text encoder
+ text_encoder, tokenizer = load_text_encoder(
+ text_encoder_dir=model_config.text_encoder_dir,
+ device=device,
+ weight_dtype=torch.bfloat16,
+ )
+
+ print("Encoding null caption...")
+ # Get null caption features
+ null_cap_feat, null_cap_mask = encode_prompt(
+ tokenizer,
+ text_encoder,
+ device,
+ torch.bfloat16,
+ [""],
+ model_config.use_last_hidden_state,
+ max_seq_length=model_config.max_seq_length,
+ )
+
+ print("Loading main model...")
+ # Load main model
+ model = instantiate_from_config(model_config.network).to(
+ device=device, dtype=dtype
+ )
+ init_from_ckpt(model, checkpoint_dir=ckpt_path, ignore_keys=None, verbose=True)
+ model.eval()
+
+ print("Loading scheduler...")
+ # Load scheduler
+ transport = instantiate_from_config(model_config.transport)
+ scheduler = TransitionSchedule(
+ transport=transport, **OmegaConf.to_container(model_config.transition_loss)
+ )
+
+ print("All components loaded successfully!")
+
+ except Exception as e:
+ print(f"Error loading model components: {e}")
+ raise e
+
+
+@spaces.GPU(duration=60)
+def generate_image(
+ prompt,
+ seed=42,
+ randomize_seed=False,
+ width=1024,
+ height=1024,
+ guidance_scale=2.5,
+ num_inference_steps=16,
+ progress=gr.Progress(track_tqdm=True),
+):
+ """Generate image from text prompt"""
+ try:
+ # Validate inputs
+ if not prompt or len(prompt.strip()) == 0:
+ raise ValueError("Please enter a valid prompt")
+
+ if model is None or scheduler is None:
+ raise RuntimeError("Model components not loaded. Please check the setup.")
+
+ # Validate dimensions
+ if (
+ width < 256
+ or width > MAX_IMAGE_SIZE
+ or height < 256
+ or height > MAX_IMAGE_SIZE
+ ):
+ raise ValueError(
+ f"Image dimensions must be between 256 and {MAX_IMAGE_SIZE}"
+ )
+
+ if width % 32 != 0 or height % 32 != 0:
+ raise ValueError("Image dimensions must be divisible by 32")
+
+ if randomize_seed:
+ seed = random.randint(0, MAX_SEED)
+
+ generator = torch.Generator(device=device).manual_seed(seed)
+
+ # Calculate latent dimensions
+ spatial_downsample = 32 if "dc-ae" in config.model.vae_dir else 8
+ latent_h = int(height / spatial_downsample)
+ latent_w = int(width / spatial_downsample)
+
+ progress(0.1, desc="Generating random latent...")
+
+ # Generate random latent
+ z = torch.randn(
+ (1, model.in_channels, latent_h, latent_w),
+ device=device,
+ dtype=dtype,
+ generator=generator,
+ )
+
+ progress(0.1, desc="Encoding prompt...")
+
+ # Encode prompt
+ cap_features, cap_mask = encode_prompt(
+ tokenizer,
+ text_encoder,
+ device,
+ dtype,
+ [prompt],
+ config.model.use_last_hidden_state,
+ max_seq_length=config.model.max_seq_length,
+ )
+
+ cur_max_seq_len = cap_mask.sum(dim=-1).max()
+ y = cap_features[:, :cur_max_seq_len]
+
+ y_null = null_cap_feat[:, :cur_max_seq_len]
+ y_null = y_null.expand(y.shape[0], cur_max_seq_len, null_cap_feat.shape[-1])
+
+ # Generate image
+ with torch.no_grad():
+ samples = scheduler.sample(
+ model,
+ y,
+ y_null,
+ z,
+ T_max=1.0,
+ T_min=0.0,
+ num_steps=num_inference_steps,
+ cfg_scale=guidance_scale,
+ cfg_low=0.0,
+ cfg_high=1.0,
+ stochasticity_ratio=0.0,
+ sample_type="transition",
+ step_callback=lambda step: progress(
+ 0.1 + 0.9 * (step / num_inference_steps), desc="Generating image..."
+ ),
+ )[-1]
+ samples = samples.to(torch.float32)
+
+ # Decode to image
+ images = decode_func(samples)
+ images = (
+ torch.clamp(127.5 * images + 128.0, 0, 255)
+ .permute(0, 2, 3, 1)
+ .to(torch.uint8)
+ .contiguous()
+ )
+ image = Image.fromarray(images[0].cpu().numpy())
+
+ progress(1.0, desc="Complete!")
+
+ return image, seed
+
+ except Exception as e:
+ print(f"Error during image generation: {e}")
+ # Return a placeholder image or error message
+ error_img = Image.new("RGB", (512, 512), color="red")
+ return error_img, seed
+
+
+# Example prompts
+examples = [
+ ["a tiny astronaut hatching from an egg on the moon"],
+ ["🐶 Wearing 🕶 flying on the 🌈"],
+ ["an anime illustration of a wiener schnitzel"],
+ ["a photorealistic landscape of mountains at sunset"],
+ ["a majestic lion in a golden savanna at sunset"],
+ ["a futuristic city with flying cars and neon lights"],
+ ["a cozy cabin in a snowy forest with smoke coming from the chimney"],
+ ["a beautiful mermaid swimming in crystal clear water"],
+]
+
+# CSS styling
+css = """
+#col-container {
+ margin: 0 auto;
+ max-width: 520px;
+}
+"""
+
+# Initialize model components
+try:
+ load_model_components(device)
+ print("Model components loaded successfully!")
+except Exception as e:
+ print(f"Error loading model components: {e}")
+ print("Please ensure config and checkpoint files are available")
+
+# Create Gradio interface
+with gr.Blocks(css=css) as demo:
+ with gr.Column(elem_id="col-container"):
+ gr.Markdown("# TiM Text-to-Image Generator")
+ gr.Markdown(
+ "Generate high-quality images from text prompts using the TiM (Transition in Matching) model"
+ )
+
+ with gr.Row():
+ prompt = gr.Text(
+ label="Prompt",
+ show_label=False,
+ max_lines=1,
+ placeholder="Enter your prompt",
+ container=False,
+ )
+ run_button = gr.Button("Generate", scale=0)
+
+ result = gr.Image(label="Result", show_label=False)
+
+ with gr.Accordion("Advanced Settings", open=False):
+ seed = gr.Slider(
+ label="Seed",
+ minimum=0,
+ maximum=MAX_SEED,
+ step=1,
+ value=0,
+ )
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
+
+ with gr.Row():
+ width = gr.Slider(
+ label="Width",
+ minimum=256,
+ maximum=MAX_IMAGE_SIZE,
+ step=32,
+ value=1024,
+ )
+ height = gr.Slider(
+ label="Height",
+ minimum=256,
+ maximum=MAX_IMAGE_SIZE,
+ step=32,
+ value=1024,
+ )
+
+ with gr.Row():
+ guidance_scale = gr.Slider(
+ label="Guidance Scale",
+ minimum=1,
+ maximum=15,
+ step=0.1,
+ value=2.5,
+ )
+ num_inference_steps = gr.Slider(
+ label="Number of inference steps",
+ minimum=1,
+ maximum=50,
+ step=1,
+ value=16,
+ )
+
+ gr.Examples(
+ examples=examples,
+ fn=generate_image,
+ inputs=[prompt],
+ outputs=[result, seed],
+ cache_examples="lazy",
+ )
+
+ gr.on(
+ triggers=[run_button.click, prompt.submit],
+ fn=generate_image,
+ inputs=[
+ prompt,
+ seed,
+ randomize_seed,
+ width,
+ height,
+ guidance_scale,
+ num_inference_steps,
+ ],
+ outputs=[result, seed],
+ )
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/configs/c2i/tim_b_p4.yaml b/configs/c2i/tim_b_p4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1f63c343187d1ac8bf791f3a8b80ceb4fd64e2f1
--- /dev/null
+++ b/configs/c2i/tim_b_p4.yaml
@@ -0,0 +1,78 @@
+model:
+ transport:
+ target: tim.schedulers.transports.OT_FM
+ params:
+ P_mean: -0.4
+ P_std: 1.0
+ sigma_d: 1.0
+ transition_loss:
+ diffusion_ratio: 0.5
+ consistency_ratio: 0.1
+ derivative_type: dde
+ differential_epsilon: 0.005
+ weight_time_type: sqrt
+ weight_time_tangent: True
+ network:
+ target: tim.models.c2i.tim_model.TiM
+ params:
+ input_size: 32
+ patch_size: 4
+ in_channels: 4
+ class_dropout_prob: 0.1
+ num_classes: 1000
+ depth: 12
+ hidden_size: 768
+ num_heads: 12
+ encoder_depth: 4
+ qk_norm: True
+ z_dim: 768
+ new_condition: t-r
+ use_new_embed: True
+ distance_aware: True
+ lora_hidden_size: 256
+ # pretrained_vae:
+ vae_dir: stabilityai/sd-vae-ft-ema
+ # repa encoder
+ enc_dir: checkpoints/radio/radio-v2.5-b_half.pth.tar
+ proj_coeff: 1.0
+ # ema
+ use_ema: True
+ ema_decay: 0.9999
+
+data:
+ data_type: latent
+ dataset:
+ latent_dir: datasets/imagenet1k/sd-vae-ft-ema-256x256
+ image_dir: datasets/imagenet1k/images/train
+ image_size: 256
+ dataloader:
+ num_workers: 16
+ batch_size: 256 # Batch size (per device) for the training dataloader.
+
+
+
+training:
+ tracker: null
+ max_train_steps: 100000
+ checkpointing_steps: 2000
+ checkpoints_total_limit: 2
+ resume_from_checkpoint: latest
+ learning_rate: 1.0e-4
+ learning_rate_base_batch_size: 256
+ scale_lr: True
+ lr_scheduler: constant # "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]
+ lr_warmup_steps: 0
+ gradient_accumulation_steps: 1
+ optimizer:
+ target: torch.optim.AdamW
+ params:
+ # betas: ${tuple:0.9, 0.999}
+ betas: [0.9, 0.95]
+ weight_decay: 1.0e-2
+ eps: 1.0e-6
+ max_grad_norm: 1.0
+ proportion_empty_prompts: 0.0
+ mixed_precision: bf16 # ["no", "fp16", "bf16"]
+ allow_tf32: True
+ validation_steps: 500
+ checkpoint_list: [100000, 200000, 300000]
diff --git a/configs/c2i/tim_xl_p1_512.yaml b/configs/c2i/tim_xl_p1_512.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2aaf1e9391a9c4b106586bb44a8be7717025af52
--- /dev/null
+++ b/configs/c2i/tim_xl_p1_512.yaml
@@ -0,0 +1,85 @@
+model:
+ transport:
+ target: tim.schedulers.transports.OT_FM
+ params:
+ P_mean: -0.4
+ P_std: 1.0
+ sigma_d: 1.0
+ T_max: 1.0
+ T_min: 0.0
+ enhance_target: False
+ w_gt: 1.0
+ w_cond: 0.0
+ w_start: 0.0
+ w_end: 0.0
+ transition_loss:
+ diffusion_ratio: 0.5
+ consistency_ratio: 0.1
+ derivative_type: dde
+ differential_epsilon: 0.005
+ weight_time_type: sqrt
+ weight_time_tangent: True
+ network:
+ target: tim.models.c2i.tim_model.TiM
+ params:
+ input_size: 16
+ patch_size: 1
+ in_channels: 32
+ class_dropout_prob: 0.1
+ num_classes: 1000
+ depth: 28
+ hidden_size: 1152
+ num_heads: 16
+ encoder_depth: 8
+ qk_norm: True
+ z_dim: 768
+ new_condition: t-r
+ use_new_embed: True
+ distance_aware: True
+ lora_hidden_size: 384
+ # pretrained_vae:
+ vae_dir: mit-han-lab/dc-ae-f32c32-sana-1.1-diffusers
+ # repa encoder
+ enc_dir: checkpoints/radio/radio-v2.5-b_half.pth.tar
+ proj_coeff: 1.0
+ # ema
+ use_ema: True
+ ema_decay: 0.9999
+
+data:
+ data_type: latent
+ dataset:
+ latent_dir: datasets/imagenet1k/dc-ae-f32c32-sana-1.1-diffusers-512x512
+ image_dir: datasets/imagenet1k/images/train
+ image_size: 512
+ dataloader:
+ num_workers: 4
+ batch_size: 64 # Batch size (per device) for the training dataloader.
+
+
+
+training:
+ tracker: null
+ max_train_steps: 750000
+ checkpointing_steps: 2000
+ checkpoints_total_limit: 2
+ resume_from_checkpoint: latest
+ learning_rate: 1.0e-4
+ learning_rate_base_batch_size: 256
+ scale_lr: True
+ lr_scheduler: constant # "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]
+ lr_warmup_steps: 0
+ gradient_accumulation_steps: 1
+ optimizer:
+ target: torch.optim.AdamW
+ params:
+ # betas: ${tuple:0.9, 0.999}
+ betas: [0.9, 0.95]
+ weight_decay: 1.0e-2
+ eps: 1.0e-6
+ max_grad_norm: 1.0
+ proportion_empty_prompts: 0.0
+ mixed_precision: bf16 # ["no", "fp16", "bf16"]
+ allow_tf32: True
+ validation_steps: 500
+ checkpoint_list: [100000, 250000, 500000]
diff --git a/configs/c2i/tim_xl_p1_512_mg.yaml b/configs/c2i/tim_xl_p1_512_mg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..29df3e40c86a865afc36aa585aa506f57079e9e2
--- /dev/null
+++ b/configs/c2i/tim_xl_p1_512_mg.yaml
@@ -0,0 +1,85 @@
+model:
+ transport:
+ target: tim.schedulers.transports.OT_FM
+ params:
+ P_mean: -0.4
+ P_std: 1.0
+ sigma_d: 1.0
+ T_max: 1.0
+ T_min: 0.0
+ enhance_target: True
+ w_gt: 1.0
+ w_cond: 0.75
+ w_start: 0.3
+ w_end: 0.8
+ transition_loss:
+ diffusion_ratio: 0.5
+ consistency_ratio: 0.1
+ derivative_type: dde
+ differential_epsilon: 0.005
+ weight_time_type: sqrt
+ weight_time_tangent: True
+ network:
+ target: tim.models.c2i.tim_model.TiM
+ params:
+ input_size: 16
+ patch_size: 1
+ in_channels: 32
+ class_dropout_prob: 0.1
+ num_classes: 1000
+ depth: 28
+ hidden_size: 1152
+ num_heads: 16
+ encoder_depth: 8
+ qk_norm: True
+ z_dim: 768
+ new_condition: t-r
+ use_new_embed: True
+ distance_aware: True
+ lora_hidden_size: 384
+ # pretrained_vae:
+ vae_dir: mit-han-lab/dc-ae-f32c32-sana-1.1-diffusers
+ # repa encoder
+ enc_dir: checkpoints/radio/radio-v2.5-b_half.pth.tar
+ proj_coeff: 1.0
+ # ema
+ use_ema: True
+ ema_decay: 0.9999
+
+data:
+ data_type: latent
+ dataset:
+ latent_dir: datasets/imagenet1k/dc-ae-f32c32-sana-1.1-diffusers-512x512
+ image_dir: datasets/imagenet1k/images/train
+ image_size: 512
+ dataloader:
+ num_workers: 4
+ batch_size: 64 # Batch size (per device) for the training dataloader.
+
+
+
+training:
+ tracker: null
+ max_train_steps: 750000
+ checkpointing_steps: 2000
+ checkpoints_total_limit: 2
+ resume_from_checkpoint: latest
+ learning_rate: 1.0e-4
+ learning_rate_base_batch_size: 256
+ scale_lr: True
+ lr_scheduler: constant # "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]
+ lr_warmup_steps: 0
+ gradient_accumulation_steps: 1
+ optimizer:
+ target: torch.optim.AdamW
+ params:
+ # betas: ${tuple:0.9, 0.999}
+ betas: [0.9, 0.95]
+ weight_decay: 1.0e-2
+ eps: 1.0e-6
+ max_grad_norm: 1.0
+ proportion_empty_prompts: 0.0
+ mixed_precision: bf16 # ["no", "fp16", "bf16"]
+ allow_tf32: True
+ validation_steps: 500
+ checkpoint_list: [100000, 250000, 500000]
diff --git a/configs/c2i/tim_xl_p2_256.yaml b/configs/c2i/tim_xl_p2_256.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..96502104cd8497467941c893f0738f49aef7e767
--- /dev/null
+++ b/configs/c2i/tim_xl_p2_256.yaml
@@ -0,0 +1,85 @@
+model:
+ transport:
+ target: tim.schedulers.transports.OT_FM
+ params:
+ P_mean: -0.4
+ P_std: 1.0
+ sigma_d: 1.0
+ T_max: 1.0
+ T_min: 0.0
+ enhance_target: False
+ w_gt: 1.0
+ w_cond: 0.0
+ w_start: 0.0
+ w_end: 0.0
+ transition_loss:
+ diffusion_ratio: 0.5
+ consistency_ratio: 0.1
+ derivative_type: dde
+ differential_epsilon: 0.005
+ weight_time_type: sqrt
+ weight_time_tangent: True
+ network:
+ target: tim.models.c2i.tim_model.TiM
+ params:
+ input_size: 32
+ patch_size: 2
+ in_channels: 4
+ class_dropout_prob: 0.1
+ num_classes: 1000
+ depth: 28
+ hidden_size: 1152
+ num_heads: 16
+ encoder_depth: 8
+ qk_norm: True
+ z_dim: 768
+ new_condition: t-r
+ use_new_embed: True
+ distance_aware: True
+ lora_hidden_size: 384
+ # pretrained_vae:
+ vae_dir: stabilityai/sd-vae-ft-ema
+ # repa encoder
+ enc_dir: checkpoints/radio/radio-v2.5-b_half.pth.tar
+ proj_coeff: 1.0
+ # ema
+ use_ema: True
+ ema_decay: 0.9999
+
+data:
+ data_type: latent
+ dataset:
+ latent_dir: datasets/imagenet1k/sd-vae-ft-ema-256x256
+ image_dir: datasets/imagenet1k/images/train
+ image_size: 256
+ dataloader:
+ num_workers: 4
+ batch_size: 64 # Batch size (per device) for the training dataloader.
+
+
+
+training:
+ tracker: null
+ max_train_steps: 750000
+ checkpointing_steps: 2000
+ checkpoints_total_limit: 2
+ resume_from_checkpoint: latest
+ learning_rate: 1.0e-4
+ learning_rate_base_batch_size: 256
+ scale_lr: True
+ lr_scheduler: constant # "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]
+ lr_warmup_steps: 0
+ gradient_accumulation_steps: 1
+ optimizer:
+ target: torch.optim.AdamW
+ params:
+ # betas: ${tuple:0.9, 0.999}
+ betas: [0.9, 0.95]
+ weight_decay: 1.0e-2
+ eps: 1.0e-6
+ max_grad_norm: 1.0
+ proportion_empty_prompts: 0.0
+ mixed_precision: bf16 # ["no", "fp16", "bf16"]
+ allow_tf32: True
+ validation_steps: 500
+ checkpoint_list: [100000, 250000, 500000]
diff --git a/configs/c2i/tim_xl_p2_256_mg.yaml b/configs/c2i/tim_xl_p2_256_mg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..00bfb806aad1413c71b838905cf33997bf17ca16
--- /dev/null
+++ b/configs/c2i/tim_xl_p2_256_mg.yaml
@@ -0,0 +1,85 @@
+model:
+ transport:
+ target: tim.schedulers.transports.OT_FM
+ params:
+ P_mean: -0.4
+ P_std: 1.0
+ sigma_d: 1.0
+ T_max: 1.0
+ T_min: 0.0
+ enhance_target: True
+ w_gt: 1.0
+ w_cond: 0.75
+ w_start: 0.3
+ w_end: 0.8
+ transition_loss:
+ diffusion_ratio: 0.5
+ consistency_ratio: 0.1
+ derivative_type: dde
+ differential_epsilon: 0.005
+ weight_time_type: sqrt
+ weight_time_tangent: True
+ network:
+ target: tim.models.c2i.tim_model.TiM
+ params:
+ input_size: 32
+ patch_size: 2
+ in_channels: 4
+ class_dropout_prob: 0.1
+ num_classes: 1000
+ depth: 28
+ hidden_size: 1152
+ num_heads: 16
+ encoder_depth: 8
+ qk_norm: True
+ z_dim: 768
+ new_condition: t-r
+ use_new_embed: True
+ distance_aware: True
+ lora_hidden_size: 384
+ # pretrained_vae:
+ vae_dir: stabilityai/sd-vae-ft-ema
+ # repa encoder
+ enc_dir: checkpoints/radio/radio-v2.5-b_half.pth.tar
+ proj_coeff: 1.0
+ # ema
+ use_ema: True
+ ema_decay: 0.9999
+
+data:
+ data_type: latent
+ dataset:
+ latent_dir: datasets/imagenet1k/sd-vae-ft-ema-256x256
+ image_dir: datasets/imagenet1k/images/train
+ image_size: 256
+ dataloader:
+ num_workers: 4
+ batch_size: 64 # Batch size (per device) for the training dataloader.
+
+
+
+training:
+ tracker: null
+ max_train_steps: 750000
+ checkpointing_steps: 2000
+ checkpoints_total_limit: 2
+ resume_from_checkpoint: latest
+ learning_rate: 1.0e-4
+ learning_rate_base_batch_size: 256
+ scale_lr: True
+ lr_scheduler: constant # "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]
+ lr_warmup_steps: 0
+ gradient_accumulation_steps: 1
+ optimizer:
+ target: torch.optim.AdamW
+ params:
+ # betas: ${tuple:0.9, 0.999}
+ betas: [0.9, 0.95]
+ weight_decay: 1.0e-2
+ eps: 1.0e-6
+ max_grad_norm: 1.0
+ proportion_empty_prompts: 0.0
+ mixed_precision: bf16 # ["no", "fp16", "bf16"]
+ allow_tf32: True
+ validation_steps: 500
+ checkpoint_list: [100000, 250000, 500000]
diff --git a/configs/t2i/tim_xl_p1_t2i.yaml b/configs/t2i/tim_xl_p1_t2i.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3f3e7cab246a4d72af86b824473c59ccddc16910
--- /dev/null
+++ b/configs/t2i/tim_xl_p1_t2i.yaml
@@ -0,0 +1,81 @@
+model:
+ transport:
+ target: tim.schedulers.transports.OT_FM
+ params:
+ P_mean: 0.0
+ P_std: 1.6
+ sigma_d: 1.0
+ transition_loss:
+ diffusion_ratio: 0.5
+ consistency_ratio: 0.1
+ derivative_type: dde
+ differential_epsilon: 0.005
+ weight_time_type: sqrt
+ weight_time_tangent: True
+ network:
+ target: tim.models.t2i.tim_model.TiM
+ params:
+ input_size: 16
+ patch_size: 1
+ in_channels: 32
+ depth: 28
+ hidden_size: 1152
+ cap_feat_dim: 1152
+ num_heads: 16
+ encoder_depth: 8
+ qk_norm: True
+ z_dim: 768
+ new_condition: t-r
+ use_new_embed: True
+ distance_aware: True
+ lora_hidden_size: 384
+ # pretrained_vae:
+ vae_dir: mit-han-lab/dc-ae-f32c32-sana-1.1-diffusers
+ # text encoder
+ text_encoder_dir: google/gemma-3-1b-it
+ proportion_empty_prompts: 0.1
+ use_last_hidden_state: True
+ max_seq_length: 256
+ # repa encoder
+ enc_dir: checkpoints/radio/radio-v2.5-b_half.pth.tar
+ proj_coeff: 1.0
+ # ema
+ use_ema: True
+ ema_decay: 0.9999
+
+data:
+ data_type: image_ms
+ dataset:
+ root_dir: datasets/t2i_toy_dataset
+ packed_json: datasets/t2i_toy_dataset/bucket_sampler.json
+ jsonl_dir: datasets/t2i_toy_dataset/data_info.jsonl
+ dataloader:
+ num_workers: 4
+ batch_size: 128 # Batch size (per device) for the training dataloader.
+
+
+training:
+ tracker: null
+ max_train_steps: 500000
+ checkpointing_steps: 1000
+ checkpoints_total_limit: 2
+ resume_from_checkpoint: latest
+ learning_rate: 1.0e-4
+ learning_rate_base_batch_size: 512
+ scale_lr: True
+ lr_scheduler: constant # "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]
+ lr_warmup_steps: 0
+ gradient_accumulation_steps: 1
+ optimizer:
+ target: torch.optim.AdamW
+ params:
+ # betas: ${tuple:0.9, 0.999}
+ betas: [0.9, 0.95]
+ weight_decay: 1.0e-2
+ eps: 1.0e-6
+ max_grad_norm: 1.0
+ proportion_empty_prompts: 0.0
+ mixed_precision: bf16 # ["no", "fp16", "bf16"]
+ allow_tf32: True
+ validation_steps: 500
+ checkpoint_list: [100000, 200000, 300000, 400000]
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..590e87450429860f3a226cdd663062c771b2ba83
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,31 @@
+[project]
+name = "tim"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.10"
+dependencies = [
+ "accelerate>=0.33.0",
+ "bitsandbytes>=0.47.0",
+ "diffusers==0.33.1",
+ "einops>=0.8.1",
+ "flash-attn>=2.8.3",
+ "gradio>=5.44.1",
+ "imageio==2.34.2",
+ "imageio-ffmpeg==0.5.1",
+ "moviepy==1.0.3",
+ "numpy==1.26.0",
+ "omegaconf>=2.3.0",
+ "pillow==9.5.0",
+ "safetensors>=0.6.2",
+ "sentencepiece>=0.2.0",
+ "spaces>=0.40.1",
+ "streamlit>=1.38.0",
+ "timm>=1.0.19",
+ "torch>=2.8.0",
+ "torchdiffeq>=0.2.5",
+ "torchvision>=0.23.0",
+ "transformers>=4.44.2",
+ "triton>=3.4.0",
+ "wandb>=0.21.3",
+]
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bdaef997e8f4cf83d27f69cd3820d7046951cba9
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,15 @@
+gradio>=4.0.0
+spaces>=0.28.0
+torch>=2.1.0
+torchvision
+diffusers
+transformers>=4.25.0
+omegaconf
+einops
+numpy
+Pillow
+safetensors
+tqdm
+flash-attn>=2.0.0
+accelerate
+-e .
\ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..43ce2341187f7789b155aef8903b60d703d115a5
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,12 @@
+from setuptools import find_packages, setup
+
+setup(
+ name="tim",
+ version="0.0.1",
+ description="",
+ packages=find_packages(),
+ install_requires=[
+ "torch",
+ "numpy",
+ ],
+)
diff --git a/tim/data/c2i_data.py b/tim/data/c2i_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeac25cabee65ad8b466760e41de478729fd78df
--- /dev/null
+++ b/tim/data/c2i_data.py
@@ -0,0 +1,150 @@
+import os
+import json
+import datetime
+import torchvision
+import numpy as np
+import torch
+
+from omegaconf import OmegaConf
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision.datasets import ImageFolder
+from torchvision import transforms
+from torchvision.transforms.functional import hflip
+from accelerate.logging import get_logger
+from safetensors.torch import load_file
+from .sampler_utils import get_train_sampler
+
+
+logger = get_logger(__name__, log_level="INFO")
+
+
+def center_crop_arr(pil_image, image_size):
+ """
+ Center cropping implementation from ADM.
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
+ """
+ while min(*pil_image.size) >= 2 * image_size:
+ pil_image = pil_image.resize(
+ tuple(x // 2 for x in pil_image.size), resample=Image.Resampling.BOX
+ )
+
+ scale = image_size / min(*pil_image.size)
+ pil_image = pil_image.resize(
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.Resampling.BICUBIC
+ )
+
+ arr = np.array(pil_image)
+ crop_y = (arr.shape[0] - image_size) // 2
+ crop_x = (arr.shape[1] - image_size) // 2
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
+
+class ImagenetDictWrapper(Dataset):
+ def __init__(self, dataset):
+ super().__init__()
+ self.dataset = dataset
+
+ def __getitem__(self, i):
+ x, y = self.dataset[i]
+ return {"image": x, "label": y}
+
+ def __len__(self):
+ return len(self.dataset)
+
+class ImagenetLatentDataset(Dataset):
+ def __init__(self, latent_dir, image_dir, image_size):
+ super().__init__()
+ self.RandomHorizontalFlipProb = 0.5
+ self.transform = transforms.Compose([
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, image_size)),
+ transforms.Lambda(lambda pil_image: (pil_image, hflip(pil_image))),
+ transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
+ ])
+
+ self.dataset = []
+ for class_folder in os.listdir(image_dir):
+ if os.path.isfile(os.path.join(image_dir, class_folder)):
+ continue
+ latent_class_folder = os.path.join(latent_dir, class_folder)
+ image_class_folder = os.path.join(image_dir, class_folder)
+ for file in os.listdir(image_class_folder):
+ self.dataset.append(
+ dict(
+ latent=os.path.join(latent_class_folder, file.split('.')[0]+'.safetensors'),
+ image=os.path.join(image_class_folder, file)
+ )
+ )
+
+ def __len__(self):
+ return len(self.dataset)
+
+ def __getitem__(self, idx):
+ data_item = dict()
+ data = load_file(self.dataset[idx]['latent'])
+ image = self.transform(Image.open(self.dataset[idx]['image']).convert("RGB"))
+ if torch.rand(1) < self.RandomHorizontalFlipProb:
+ data_item['latent'] = data['latent'][0]
+ data_item['image'] = image[0]
+ else:
+ data_item['latent'] = data['latent'][1]
+ data_item['image'] = image[1]
+ data_item['label'] = data['label']
+ return data_item
+
+
+
+class C2ILoader():
+ def __init__(self, data_config):
+ super().__init__()
+
+ self.batch_size = data_config.dataloader.batch_size
+ self.num_workers = data_config.dataloader.num_workers
+
+ self.data_type = data_config.data_type
+
+ if data_config.data_type == 'image':
+ self.train_dataset = ImagenetDictWrapper(**OmegaConf.to_container(data_config.dataset))
+ elif data_config.data_type == 'latent':
+ self.train_dataset = ImagenetLatentDataset(**OmegaConf.to_container(data_config.dataset))
+ else:
+ raise NotImplementedError
+
+
+ self.test_dataset = None
+ self.val_dataset = None
+
+ def train_len(self):
+ return len(self.train_dataset)
+
+ def train_dataloader(self, rank, world_size, global_batch_size, max_steps, resume_steps, seed):
+
+ sampler = get_train_sampler(
+ self.train_dataset, rank, world_size, global_batch_size, max_steps, resume_steps, seed
+ )
+ return DataLoader(
+ self.train_dataset,
+ batch_size=self.batch_size,
+ sampler=sampler,
+ num_workers=self.num_workers,
+ pin_memory=True,
+ drop_last=True,
+ prefetch_factor=2,
+ )
+
+ def test_dataloader(self):
+ return None
+
+ def val_dataloader(self):
+ return DataLoader(
+ self.train_dataset,
+ batch_size=self.batch_size,
+ shuffle=self.shuffle,
+ num_workers=self.num_workers,
+ pin_memory=True,
+ drop_last=True
+ )
+
+
+
+
diff --git a/tim/data/sampler_utils.py b/tim/data/sampler_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..12fa594acec20db98522fd70a56ebd6876eec7db
--- /dev/null
+++ b/tim/data/sampler_utils.py
@@ -0,0 +1,52 @@
+import torch
+import json
+
+# from https://github.com/Alpha-VLLM/LLaMA2-Accessory/blob/main/Large-DiT-ImageNet/train.py#L60
+def get_train_sampler(dataset, rank, world_size, global_batch_size, max_steps,
+ resume_step, seed):
+ sample_indices = torch.empty([max_steps * global_batch_size // world_size],
+ dtype=torch.long)
+ epoch_id, fill_ptr, offs = 0, 0, 0
+ while fill_ptr < sample_indices.size(0):
+ g = torch.Generator()
+ g.manual_seed(seed + epoch_id)
+ epoch_sample_indices = torch.randperm(len(dataset), generator=g)
+ epoch_id += 1
+ epoch_sample_indices = epoch_sample_indices[
+ (rank + offs) % world_size::world_size
+ ]
+ offs = (offs + world_size - len(dataset) % world_size) % world_size
+ epoch_sample_indices = epoch_sample_indices[
+ :sample_indices.size(0) - fill_ptr
+ ]
+ sample_indices[fill_ptr: fill_ptr + epoch_sample_indices.size(0)] = \
+ epoch_sample_indices
+ fill_ptr += epoch_sample_indices.size(0)
+ return sample_indices[resume_step * global_batch_size // world_size:].tolist()
+
+
+
+
+def get_packed_batch_sampler(
+ dataset, rank, world_size, max_steps, resume_step, seed
+ ):
+ sample_indices = [None for _ in range(max_steps)]
+ epoch_id, fill_ptr, offs = 0, 0, 0
+ while fill_ptr < len(sample_indices):
+ g = torch.Generator()
+ g.manual_seed(seed + epoch_id)
+ epoch_sample_indices = torch.randperm(len(dataset), generator=g)
+ epoch_id += 1
+ epoch_sample_indices = epoch_sample_indices[
+ (rank + offs) % world_size::world_size
+ ]
+ offs = (offs + world_size - len(dataset) % world_size) % world_size
+ epoch_sample_indices = epoch_sample_indices[
+ :len(sample_indices) - fill_ptr
+ ]
+ sample_indices[fill_ptr: fill_ptr + epoch_sample_indices.size(0)] = [
+ dataset[i] for i in epoch_sample_indices
+ ]
+ fill_ptr += epoch_sample_indices.size(0)
+ return sample_indices[resume_step:]
+
diff --git a/tim/data/t2i_data.py b/tim/data/t2i_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f09b515309addd00c5c45df8394c438ab2fc60b
--- /dev/null
+++ b/tim/data/t2i_data.py
@@ -0,0 +1,126 @@
+import torch
+import csv
+import json
+import os
+import random
+import ast
+import numpy as np
+from omegaconf import OmegaConf
+from torchvision import transforms
+from torch.utils.data import DataLoader, Dataset
+from PIL import Image
+from tqdm import tqdm
+from safetensors.torch import save_file, load_file
+from .sampler_utils import get_train_sampler, get_packed_batch_sampler
+
+
+
+def resize_arr(pil_image, height, width):
+ pil_image = pil_image.resize((width, height), resample=Image.Resampling.BICUBIC)
+
+ return pil_image
+
+
+class T2IDatasetMS(Dataset):
+ def __init__(self, root_dir, packed_json, jsonl_dir) -> None:
+ super().__init__()
+ self.root_dir = root_dir
+ self.dataset = []
+ with open(packed_json, 'r') as fp:
+ self.packed_dataset = json.load(fp)
+
+ with open(jsonl_dir, 'r') as fp:
+ self.dataset = [json.loads(line) for line in fp]
+
+
+ def __len__(self):
+ return len(self.dataset)
+
+ def get_one_data(self, data_meta):
+ data_item = dict()
+ image_file = os.path.join(self.root_dir, data_meta['image_file'])
+
+ image = Image.open(image_file).convert("RGB")
+
+ bucket = data_meta['bucket']
+ resolutions = bucket.split('-')[-1].split('x')
+ height, width = int(int(resolutions[0])/32)*32, int(int(resolutions[1])/32)*32
+ transform = transforms.Compose([
+ transforms.Lambda(lambda pil_image: resize_arr(pil_image, height, width)),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
+ ])
+ image = transform(image)
+
+ data_item['image'] = image
+ data_item['caption'] = random.choice(data_meta['captions']).encode('unicode-escape').decode('utf-8')
+
+ return data_item
+
+ def __getitem__(self, index):
+ data_meta = self.dataset[index]
+ # data_item = self.get_one_data(data_meta)
+ try:
+ data_item = self.get_one_data(data_meta)
+ except:
+ print(f"Warning: {data_meta['image_file']} does not exist", flush=True)
+ data_item = None
+
+ return data_item
+
+
+
+def bucket_collate_fn(batch):
+ caption = []
+ image = []
+ for data in batch:
+ if data == None:
+ continue
+ caption.append(data['caption'])
+ image.append(data['image'])
+ image = torch.stack(image)
+ return dict(image=image, caption=caption)
+
+
+
+
+class T2ILoader():
+ def __init__(self, data_config):
+ super().__init__()
+
+ self.batch_size = data_config.dataloader.batch_size
+ self.num_workers = data_config.dataloader.num_workers
+
+ self.data_type = data_config.data_type
+
+ if self.data_type == 'image_ms':
+ self.train_dataset = T2IDatasetMS(**OmegaConf.to_container(data_config.dataset))
+ else:
+ raise
+ self.test_dataset = None
+ self.val_dataset = None
+
+ def train_len(self):
+ return len(self.train_dataset)
+
+ def train_dataloader(self, rank, world_size, global_batch_size, max_steps, resume_steps, seed):
+ batch_sampler = get_packed_batch_sampler(
+ self.train_dataset.packed_dataset, rank, world_size, max_steps, resume_steps, seed
+ )
+ return DataLoader(
+ self.train_dataset,
+ batch_sampler=batch_sampler,
+ collate_fn=bucket_collate_fn,
+ num_workers=self.num_workers,
+ pin_memory=True,
+ )
+
+ def test_dataloader(self):
+ return None
+
+ def val_dataloader(self):
+ return None
+
+
+
+
diff --git a/tim/models/c2i/tim_model.py b/tim/models/c2i/tim_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a2b4d71a9fd602741c2c55f7e2e4150b81d683b
--- /dev/null
+++ b/tim/models/c2i/tim_model.py
@@ -0,0 +1,406 @@
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# GLIDE: https://github.com/openai/glide-text2im
+# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
+# --------------------------------------------------------
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+import math
+from timm.layers.mlp import SwiGLU, Mlp
+from timm.models.vision_transformer import PatchEmbed, Attention
+from tim.models.utils.funcs import build_mlp, modulate, get_parameter_dtype
+from tim.models.utils.rope import VisionRotaryEmbedding, rotate_half
+from flash_attn import flash_attn_func
+
+
+#################################################################################
+# Embedding Layers for Timesteps and Class Labels #
+#################################################################################
+class TimestepEmbedder(nn.Module):
+ """
+ Embeds scalar timesteps into vector representations.
+ """
+ def __init__(self, hidden_size, frequency_embedding_size=256):
+ super().__init__()
+ self.mlp = nn.Sequential(
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
+ nn.SiLU(),
+ nn.Linear(hidden_size, hidden_size, bias=True),
+ )
+ self.frequency_embedding_size = frequency_embedding_size
+
+ @staticmethod
+ def positional_embedding(t, dim, max_period=10000):
+ """
+ Create sinusoidal timestep embeddings.
+ :param t: a 1-D Tensor of N indices, one per batch element.
+ These may be fractional.
+ :param dim: the dimension of the output.
+ :param max_period: controls the minimum frequency of the embeddings.
+ :return: an (N, D) Tensor of positional embeddings.
+ """
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
+ half = dim // 2
+ freqs = torch.exp(
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
+ ).to(device=t.device)
+ args = t[:, None].float() * freqs[None]
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
+ if dim % 2:
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
+ return embedding
+
+ def forward(self, t):
+ self.timestep_embedding = self.positional_embedding
+ t_freq = self.timestep_embedding(t, dim=self.frequency_embedding_size).to(t.dtype)
+ t_emb = self.mlp(t_freq)
+ return t_emb
+
+
+class LabelEmbedder(nn.Module):
+ """
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
+ """
+ def __init__(self, num_classes, hidden_size, dropout_prob):
+ super().__init__()
+ use_cfg_embedding = dropout_prob > 0
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
+ self.num_classes = num_classes
+ self.dropout_prob = dropout_prob
+
+
+ def forward(self, labels):
+ embeddings = self.embedding_table(labels)
+ return embeddings
+
+
+
+
+#################################################################################
+# Attention Block #
+#################################################################################
+
+class Attention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int = 8,
+ qkv_bias: bool = False,
+ qk_norm: bool = False,
+ attn_drop: float = 0.,
+ proj_drop: float = 0.,
+ norm_layer: nn.Module = nn.LayerNorm,
+ distance_aware: bool = False,
+ ) -> None:
+ super().__init__()
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.scale = self.head_dim ** -0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.distance_aware = distance_aware
+ if distance_aware:
+ self.qkv_d = nn.Linear(dim, dim * 3, bias=False)
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ def forward(self, x: torch.Tensor, freqs_cos, freqs_sin, attn_type='fused_attn', delta_t=None) -> torch.Tensor:
+ B, N, C = x.shape
+ if self.distance_aware:
+ qkv = self.qkv(x) + self.qkv_d(delta_t)
+ else:
+ qkv = self.qkv(x)
+ if attn_type == 'flash_attn': # q, k, v: (B, N, n_head, d_head)
+ qkv = qkv.reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 1, 3, 4)
+ else: # q, k, v: (B, n_head, N, d_head)
+ qkv = qkv.reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
+ ori_dtype = qkv.dtype
+ q, k, v = qkv.unbind(0)
+ q, k = self.q_norm(q), self.k_norm(k)
+
+ q = q * freqs_cos + rotate_half(q) * freqs_sin
+ k = k * freqs_cos + rotate_half(k) * freqs_sin
+ q, k = q.to(ori_dtype), k.to(ori_dtype)
+
+ if attn_type == 'flash_attn':
+ x = flash_attn_func(
+ q, k, v,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ )
+ x = x.reshape(B, N, C)
+ elif attn_type == 'fused_attn':
+ x = F.scaled_dot_product_attention(
+ q, k, v,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ )
+ x = x.transpose(1, 2).reshape(B, N, C)
+ else:
+ q = q * self.scale
+ attn = q @ k.transpose(-2, -1)
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+ x = attn @ v
+ x = x.transpose(1, 2).reshape(B, N, C)
+
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+
+
+
+
+#################################################################################
+# Core TiM Model #
+#################################################################################
+
+class TiMBlock(nn.Module):
+ """
+ A TiM block with adaptive layer norm zero (adaLN-Zero) conditioning.
+ """
+ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, **block_kwargs):
+ super().__init__()
+ self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ distance_aware = block_kwargs.get('distance_aware', False)
+ self.attn = Attention(
+ hidden_size, num_heads=num_heads, qkv_bias=True, qk_norm=block_kwargs["qk_norm"],
+ distance_aware=distance_aware
+ )
+ self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
+ self.mlp = SwiGLU(
+ in_features=hidden_size, hidden_features=(mlp_hidden_dim*2)//3, bias=True
+ )
+ if block_kwargs.get('lora_hidden_size', None) != None:
+ lora_hidden_size = block_kwargs['lora_hidden_size']
+ else:
+ lora_hidden_size = (hidden_size//4)*3
+ self.adaLN_modulation = SwiGLU(
+ in_features=hidden_size, hidden_features=lora_hidden_size, out_features=6*hidden_size, bias=True
+ )
+
+
+
+ def forward(self, x, c, freqs_cos, freqs_sin, attn_type, delta_t=None):
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
+ self.adaLN_modulation(c).chunk(6, dim=-1)
+ )
+ x = x + gate_msa * self.attn(modulate(self.norm1(x), shift_msa, scale_msa), freqs_cos, freqs_sin, attn_type, delta_t)
+ x = x + gate_mlp * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
+
+ return x
+
+
+class FinalLayer(nn.Module):
+ """
+ The final layer of TiM.
+ """
+ def __init__(self, hidden_size, patch_size, out_channels):
+ super().__init__()
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
+ self.adaLN_modulation = SwiGLU(
+ in_features=hidden_size, hidden_features=hidden_size//2, out_features=2*hidden_size, bias=True
+ )
+
+
+ def forward(self, x, c):
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
+ x = modulate(self.norm_final(x), shift, scale)
+ x = self.linear(x)
+
+ return x
+
+
+class TiM(nn.Module):
+ def __init__(
+ self,
+ input_size=32,
+ patch_size=2,
+ in_channels=4,
+ hidden_size=1152,
+ encoder_depth=8,
+ depth=28,
+ num_heads=16,
+ mlp_ratio=4.0,
+ class_dropout_prob=0.1,
+ num_classes=1000,
+ z_dim=768,
+ projector_dim=2048,
+ use_checkpoint: bool = False,
+ new_condition: str = 't-r',
+ use_new_embed: bool = False,
+ **block_kwargs # qk_norm
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = in_channels
+ self.patch_size = patch_size
+ self.num_heads = num_heads
+ self.num_classes = num_classes
+ self.encoder_depth = encoder_depth
+ self.use_checkpoint = use_checkpoint
+ self.new_condition = new_condition
+ self.use_new_embed = use_new_embed
+
+ self.x_embedder = PatchEmbed(
+ input_size, patch_size, in_channels, hidden_size, bias=True, strict_img_size=False
+ )
+ self.t_embedder = TimestepEmbedder(hidden_size) # timestep embedding type
+ if use_new_embed:
+ self.delta_embedder = TimestepEmbedder(hidden_size)
+ self.y_embedder = LabelEmbedder(num_classes, hidden_size, class_dropout_prob)
+ # Will use fixed sin-cos embedding:
+ self.rope = VisionRotaryEmbedding(head_dim=hidden_size//num_heads)
+
+ self.blocks = nn.ModuleList([
+ TiMBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, **block_kwargs) for _ in range(depth)
+ ])
+ self.projector = build_mlp(hidden_size, projector_dim, z_dim)
+ self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels)
+ self.initialize_weights()
+
+ def initialize_weights(self):
+ # Initialize transformer layers:
+ def _basic_init(module):
+ if isinstance(module, nn.Linear):
+ torch.nn.init.xavier_uniform_(module.weight)
+ if module.bias is not None:
+ nn.init.constant_(module.bias, 0)
+ self.apply(_basic_init)
+
+ # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
+ w = self.x_embedder.proj.weight.data
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
+ nn.init.constant_(self.x_embedder.proj.bias, 0)
+
+ # Initialize label embedding table:
+ nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02)
+
+ # Initialize timestep embedding MLP:
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
+
+ # Zero-out adaLN modulation layers in TiM blocks:
+ for block in self.blocks:
+ nn.init.constant_(block.adaLN_modulation.fc2.weight, 0)
+ nn.init.constant_(block.adaLN_modulation.fc2.bias, 0)
+
+ # Zero-out output layers:
+ nn.init.constant_(self.final_layer.adaLN_modulation.fc2.weight, 0)
+ nn.init.constant_(self.final_layer.adaLN_modulation.fc2.bias, 0)
+
+ nn.init.constant_(self.final_layer.linear.weight, 0)
+ nn.init.constant_(self.final_layer.linear.bias, 0)
+
+ def unpatchify(self, x, H, W):
+ """
+ x: (N, T, patch_size**2 * C)
+ imgs: (N, H, W, C)
+ """
+ c = self.out_channels
+ p = self.patch_size
+ h, w = int(H/p), int(W/p)
+
+
+ x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
+ x = torch.einsum('nhwpqc->nchpwq', x)
+ imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p))
+ return imgs
+
+ def get_rope(self, h, w, attn_type):
+ grid_h = torch.arange(h)
+ grid_w = torch.arange(w)
+ grid = torch.meshgrid(grid_h, grid_w, indexing='xy')
+ grid = torch.stack(grid, dim=0).reshape(2, -1).unsqueeze(0)
+ freqs_cos, freqs_sin = self.rope.get_cached_2d_rope_from_grid(grid)
+ if attn_type == 'flash_attn': # (1, N, 1, d_head)
+ return freqs_cos.unsqueeze(2), freqs_sin.unsqueeze(2)
+ else: # (1, 1, N, d_head)
+ return freqs_cos.unsqueeze(1), freqs_sin.unsqueeze(1)
+
+
+ def forward(self, x, t, r, y, attn_type='flash_attn', return_zs=False, jvp=False):
+ """
+ Forward pass of TiM.
+ x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
+ t: (N,) tensor of diffusion timesteps
+ y: (N,) tensor of class labels
+ """
+ B, C, H, W = x.shape
+ x = self.x_embedder(x) # (N, T, D), where T = H * W / patch_size ** 2
+
+ # timestep and class embedding
+ t_embed = self.t_embedder(t).unsqueeze(1) # (N, 1, D)
+ delta_embed = self.get_delta_embed(t, r).unsqueeze(1) # (N, 1, D)
+ y = self.y_embedder(y).unsqueeze(1) # (N, 1, D)
+ c = t_embed + delta_embed + y # (N, 1, D)
+ freqs_cos, freqs_sin = self.get_rope(
+ int(H/self.patch_size), int(W/self.patch_size), attn_type
+ )
+
+ for i, block in enumerate(self.blocks):
+ if (not self.use_checkpoint) or jvp:
+ x = block(x, c, freqs_cos, freqs_sin, attn_type, delta_embed) # (N, T, D)
+ else:
+ x = torch.utils.checkpoint.checkpoint(
+ self.ckpt_wrapper(block), x, c, freqs_cos, freqs_sin, attn_type, delta_embed
+ )
+ if (i + 1) == self.encoder_depth:
+ h_proj = self.projector(x)
+
+ x = self.final_layer(x, c) # (N, T, patch_size ** 2 * out_channels)
+ x = self.unpatchify(x, H, W) # (N, out_channels, H, W)
+
+ if return_zs:
+ return x, h_proj
+ else:
+ return x
+
+ def get_delta_embed(self, t, r):
+ if self.use_new_embed:
+ delta_embedder = self.delta_embedder
+ else:
+ delta_embedder = self.t_embedder
+ if self.new_condition == 't-r':
+ delta_embed = delta_embedder(t-r)
+ elif self.new_condition == 'r':
+ delta_embed = delta_embedder(r)
+ elif self.new_condition == 't,r':
+ delta_embed = self.t_embedder(t) + delta_embedder(r)
+ elif self.new_condition == 't,t-r':
+ delta_embed = self.t_embedder(t) + delta_embedder(t-r)
+ elif self.new_condition == 'r,t-r':
+ delta_embed = self.t_embedder(r) + delta_embedder(t-r)
+ elif self.new_condition == 't,r,t-r':
+ delta_embed = self.t_embedder(t) + self.t_embedder(r) + delta_embedder(t-r)
+ else:
+ raise NotImplementedError
+ return delta_embed
+
+ def ckpt_wrapper(self, module):
+ def ckpt_forward(*inputs):
+ outputs = module(*inputs)
+ return outputs
+ return ckpt_forward
+
+
+ @property
+ def dtype(self) -> torch.dtype:
+ """
+ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
+ """
+ return get_parameter_dtype(self)
+
+
+
diff --git a/tim/models/nvidia_radio/hubconf.py b/tim/models/nvidia_radio/hubconf.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bcddd2e9b0354710a45e3d0fb22615a99575d2f
--- /dev/null
+++ b/tim/models/nvidia_radio/hubconf.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+dependencies = ["torch", "timm", "einops"]
+
+import os
+from typing import Dict, Any, Optional, Union, List
+import warnings
+
+import torch
+from torch.hub import load_state_dict_from_url
+
+from timm.models import clean_state_dict
+
+from .radio.adaptor_registry import adaptor_registry
+from .radio.common import DEFAULT_VERSION, RadioResource, RESOURCE_MAP
+from .radio.enable_damp import configure_damp_from_args
+from .radio.enable_spectral_reparam import disable_spectral_reparam, configure_spectral_reparam_from_args
+from .radio.feature_normalizer import FeatureNormalizer, IntermediateFeatureNormalizer
+from .radio.radio_model import RADIOModel, create_model_from_args
+from .radio.input_conditioner import get_default_conditioner
+from .radio.vitdet import apply_vitdet_arch, VitDetArgs
+
+
+def radio_model(
+ version: str = "",
+ progress: bool = True,
+ adaptor_names: Union[str, List[str]] = None,
+ vitdet_window_size: Optional[int] = None,
+ return_checkpoint: bool = False,
+ support_packing: bool=False,
+ **kwargs,
+) -> RADIOModel:
+ if not version:
+ version = DEFAULT_VERSION
+
+ if os.path.isfile(version):
+ chk = torch.load(version, map_location="cpu", weights_only=False)
+ resource = RadioResource(version, patch_size=None, max_resolution=None, preferred_resolution=None)
+ else:
+ resource = RESOURCE_MAP[version]
+ chk = load_state_dict_from_url(
+ resource.url, progress=progress, map_location="cpu", weights_only=False,
+ )
+
+ if "state_dict_ema" in chk:
+ state_dict = chk["state_dict_ema"]
+ chk['args'].spectral_reparam = False
+ else:
+ state_dict = chk["state_dict"]
+
+ args = chk["args"]
+ args.support_packing = support_packing
+ mod = create_model_from_args(args)
+
+ mod_state_dict = get_prefix_state_dict(state_dict, "base_model.")
+
+ if args.spectral_reparam:
+ configure_spectral_reparam_from_args(mod, args, state_dict_guidance=mod_state_dict)
+
+ if getattr(args, 'damp', None):
+ configure_damp_from_args(mod, args)
+
+ state_dict = clean_state_dict(state_dict)
+
+ key_warn = mod.load_state_dict(mod_state_dict, strict=False)
+ if key_warn.missing_keys:
+ warnings.warn(f'Missing keys in state dict: {key_warn.missing_keys}')
+ if key_warn.unexpected_keys:
+ warnings.warn(f'Unexpected keys in state dict: {key_warn.unexpected_keys}')
+
+ if chk['args'].spectral_reparam:
+ # Spectral reparametrization uses PyTorch's "parametrizations" API. The idea behind
+ # the method is that instead of there being a `weight` tensor for certain Linear layers
+ # in the model, we make it a dynamically computed function. During training, this
+ # helps stabilize the model. However, for downstream use cases, it shouldn't be necessary.
+ # Disabling it in this context means that instead of having `w' = f(w)`, we just compute `w' = f(w)`
+ # once, during this function call, and replace the parametrization with the realized weights.
+ # This makes the model run faster, and also use less memory.
+ disable_spectral_reparam(mod)
+ chk['args'].spectral_reparam = False
+
+ conditioner = get_default_conditioner()
+ conditioner.load_state_dict(get_prefix_state_dict(state_dict, "input_conditioner."))
+
+ dtype = getattr(chk['args'], 'dtype', torch.float32)
+ mod.to(dtype=dtype)
+ conditioner.dtype = dtype
+
+ cls_token_per_teacher = getattr(chk['args'], 'cls_token_per_teacher', True)
+ if cls_token_per_teacher:
+ name_to_idx_map = dict()
+ for i, t in enumerate(chk['args'].teachers):
+ if t.get('use_summary', True):
+ name = t['name']
+ if name not in name_to_idx_map:
+ name_to_idx_map[name] = i
+ summary_idxs = torch.tensor(sorted(name_to_idx_map.values()), dtype=torch.int64)
+ else:
+ summary_idxs = torch.tensor([0], dtype=torch.int64)
+
+ if adaptor_names is None:
+ adaptor_names = []
+ elif isinstance(adaptor_names, str):
+ adaptor_names = [adaptor_names]
+
+ teachers = chk["args"].teachers
+ adaptors = dict()
+ for adaptor_name in adaptor_names:
+ for tidx, tconf in enumerate(teachers):
+ if tconf["name"] == adaptor_name:
+ break
+ else:
+ raise ValueError(f'Unable to find the specified adaptor name. Known names: {list(t["name"] for t in teachers)}')
+
+ ttype = tconf["type"]
+
+ pf_idx_head = f'_heads.{tidx}'
+ pf_name_head = f'_heads.{adaptor_name}'
+ pf_idx_feat = f'_feature_projections.{tidx}'
+ pf_name_feat = f'_feature_projections.{adaptor_name}'
+
+ adaptor_state = dict()
+ for k, v in state_dict.items():
+ if k.startswith(pf_idx_head):
+ adaptor_state['summary' + k[len(pf_idx_head):]] = v
+ elif k.startswith(pf_name_head):
+ adaptor_state['summary' + k[len(pf_name_head):]] = v
+ elif k.startswith(pf_idx_feat):
+ adaptor_state['feature' + k[len(pf_idx_feat):]] = v
+ elif k.startswith(pf_name_feat):
+ adaptor_state['feature' + k[len(pf_name_feat):]] = v
+
+ adaptor = adaptor_registry.create_adaptor(ttype, chk["args"], tconf, adaptor_state)
+ adaptor.head_idx = tidx if cls_token_per_teacher else 0
+ adaptors[adaptor_name] = adaptor
+
+ feat_norm_sd = get_prefix_state_dict(state_dict, '_feature_normalizer.')
+ feature_normalizer = None
+ if feat_norm_sd:
+ feature_normalizer = FeatureNormalizer(feat_norm_sd['mean'].shape[0], dtype=dtype)
+ feature_normalizer.load_state_dict(feat_norm_sd)
+
+ inter_feat_norm_sd = get_prefix_state_dict(state_dict, '_intermediate_feature_normalizer.')
+ inter_feature_normalizer = None
+ if inter_feat_norm_sd:
+ inter_feature_normalizer = IntermediateFeatureNormalizer(
+ *inter_feat_norm_sd['means'].shape[:2],
+ rot_per_layer=inter_feat_norm_sd['rotation'].ndim == 3,
+ dtype=dtype
+ )
+ inter_feature_normalizer.load_state_dict(inter_feat_norm_sd)
+
+ radio = RADIOModel(
+ mod,
+ conditioner,
+ summary_idxs=summary_idxs,
+ patch_size=resource.patch_size,
+ max_resolution=resource.max_resolution,
+ window_size=vitdet_window_size,
+ preferred_resolution=resource.preferred_resolution,
+ adaptors=adaptors,
+ feature_normalizer=feature_normalizer,
+ inter_feature_normalizer=inter_feature_normalizer,
+ )
+
+ if vitdet_window_size is not None:
+ apply_vitdet_arch(
+ mod,
+ VitDetArgs(
+ vitdet_window_size,
+ radio.num_summary_tokens,
+ num_windowed=resource.vitdet_num_windowed,
+ num_global=resource.vitdet_num_global,
+ ),
+ )
+
+ if return_checkpoint:
+ return radio, chk
+ return radio
+
+
+def get_prefix_state_dict(state_dict: Dict[str, Any], prefix: str):
+ mod_state_dict = {
+ k[len(prefix) :]: v for k, v in state_dict.items() if k.startswith(prefix)
+ }
+ return mod_state_dict
diff --git a/tim/models/nvidia_radio/radio/__init__.py b/tim/models/nvidia_radio/radio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..be876fb58786fa23bf012512bdd72388b2c62051
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+# Register the adaptors
+from .adaptor_registry import adaptor_registry
+from . import open_clip_adaptor
+from .adaptor_base import AdaptorInput, RadioOutput, AdaptorBase
+
+# Enable support for other model types via the timm register_model mechanism
+from . import extra_timm_models
+from . import extra_models
+from . import vision_transformer_xpos
diff --git a/tim/models/nvidia_radio/radio/adaptor_base.py b/tim/models/nvidia_radio/radio/adaptor_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c12ea2e5eea00f5ce9fdde39c0c2b3acd2e1fbe
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/adaptor_base.py
@@ -0,0 +1,37 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+from argparse import Namespace
+from typing import NamedTuple, Optional
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+
+class AdaptorInput(NamedTuple):
+ images: torch.Tensor
+ summary: torch.Tensor
+ features: torch.Tensor
+ feature_fmt: str
+ patch_size: int
+
+
+class RadioOutput(NamedTuple):
+ summary: torch.Tensor
+ features: torch.Tensor
+
+ def to(self, *args, **kwargs):
+ return RadioOutput(
+ self.summary.to(*args, **kwargs) if self.summary is not None else None,
+ self.features.to(*args, **kwargs) if self.features is not None else None,
+ )
+
+
+class AdaptorBase(nn.Module):
+ def forward(self, input: AdaptorInput) -> RadioOutput:
+ raise NotImplementedError("Subclasses must implement this!")
diff --git a/tim/models/nvidia_radio/radio/adaptor_generic.py b/tim/models/nvidia_radio/radio/adaptor_generic.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ff46256badf79bdd7a7a74cf6a17338360ed7b8
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/adaptor_generic.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+from argparse import Namespace
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from .adaptor_base import AdaptorBase, AdaptorInput, RadioOutput
+from .adaptor_mlp import create_mlp_from_state, create_mlp_from_config
+
+
+class GenericAdaptor(AdaptorBase):
+ def __init__(self, main_config: Namespace, adaptor_config, state, mlp_config=None):
+ super().__init__()
+
+ extra_args = dict()
+ ups = None
+ ups_rank = None
+ if adaptor_config is not None:
+ ups = adaptor_config.get('fd_upsample_factor', None)
+ ups_rank = adaptor_config.get('fd_upsample_rank', None)
+ elif mlp_config is not None:
+ ups = mlp_config["feature"].get('upsample_factor', None)
+ ups_rank = mlp_config["feature"].get('upsample_rank', None)
+ if ups is not None:
+ extra_args['upsample_factor'] = ups
+ extra_args['upsample_rank'] = ups_rank
+
+ if state is not None:
+ spectral_heads = getattr(main_config, 'spectral_heads', False)
+ self.head_mlp = create_mlp_from_state(main_config.mlp_version, state, 'summary.', spectral_weights=spectral_heads)
+ self.feat_mlp = create_mlp_from_state(main_config.mlp_version, state, 'feature.', spectral_weights=spectral_heads, **extra_args)
+ else:
+ assert mlp_config is not None, "Config must not be None if state is None"
+
+ self.head_mlp = create_mlp_from_config(
+ main_config.mlp_version,
+ mlp_config["summary"]["input_dim"],
+ mlp_config["summary"]["hidden_dim"],
+ mlp_config["summary"]["output_dim"],
+ mlp_config["summary"]["num_inner"],
+ )
+ self.feat_mlp = create_mlp_from_config(
+ main_config.mlp_version,
+ mlp_config["feature"]["input_dim"],
+ mlp_config["feature"]["hidden_dim"],
+ mlp_config["feature"]["output_dim"],
+ mlp_config["feature"]["num_inner"],
+ **extra_args
+ )
+
+ def forward(self, input: AdaptorInput) -> RadioOutput:
+ # Convert input'd type to the type of the first parameter of the adaptor.
+ first_param = next(self.parameters())
+ summary = self.head_mlp(input.summary.to(dtype=first_param.dtype)).to(dtype=input.summary.dtype)
+ feat = self.feat_mlp(input.features.to(dtype=first_param.dtype), images=input.images, patch_size=input.patch_size).to(dtype=input.features.dtype)
+
+ if input.feature_fmt == 'NCHW':
+ feat = (feat.reshape(feat.shape[0], input.images.shape[-2] // input.patch_size * self.feat_mlp.upsample_factor, input.images.shape[-1] // input.patch_size * self.feat_mlp.upsample_factor, feat.shape[2])
+ .permute(0, 3, 1, 2)
+ )
+
+ return RadioOutput(summary, feat)
diff --git a/tim/models/nvidia_radio/radio/adaptor_mlp.py b/tim/models/nvidia_radio/radio/adaptor_mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac93dbb1a101f1986ef00e2490899dcd47ba2343
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/adaptor_mlp.py
@@ -0,0 +1,174 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+import math
+from typing import Dict, Optional
+
+import torch
+from torch import nn
+
+from einops import rearrange
+from timm.models.vision_transformer import Block
+
+from .enable_spectral_reparam import disable_spectral_reparam, enable_spectral_reparam
+
+
+class MLP(nn.Module):
+ def __init__(self, input_size: int, hidden_size: int, output_size: int,
+ num_inner: int = 0, device: torch.device = None, **kwargs):
+ super(MLP, self).__init__()
+ self.fc1 = nn.Linear(input_size, hidden_size, device=device)
+ self.norm = nn.LayerNorm(hidden_size, device=device)
+ self.relu = nn.ReLU()
+
+ inner = []
+ for _ in range(num_inner):
+ inner.extend([
+ nn.Linear(hidden_size, hidden_size, device=device),
+ nn.LayerNorm(hidden_size, device=device),
+ nn.ReLU(),
+ ])
+ if inner:
+ self.inner = nn.Sequential(*inner)
+ else:
+ self.inner = nn.Identity()
+
+ self.fc2 = nn.Linear(hidden_size, output_size, device=device)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.fc1(x)
+ x = self.norm(x)
+ x = self.relu(x)
+ x = self.inner(x)
+ x = self.fc2(x)
+ return x
+
+
+class MLP2(nn.Module):
+ def __init__(self, input_size: int, hidden_size: int, output_size: int,
+ num_inner: int = 0,
+ pre_norm: bool = False, device: torch.device = None,
+ upsample_factor: int = 1,
+ upsample_rank: int = None,
+ from_config: bool = False,
+ **kwargs):
+ super().__init__()
+
+ self.pre_norm = nn.Sequential(
+ nn.LayerNorm(input_size),
+ nn.GELU(),
+ ) if pre_norm else nn.Identity()
+
+ self.upsample_factor = upsample_factor
+ sq_ups = upsample_factor ** 2
+
+ self._real_output_dim = output_size // sq_ups
+
+ # hidden_size *= upsample_factor
+ # output_size *= (upsample_factor ** 2)
+
+ self.fc1 = nn.Linear(input_size, hidden_size, device=device)
+
+ blocks = []
+ for _ in range(num_inner):
+ blocks.append(nn.Sequential(
+ nn.LayerNorm(hidden_size, device=device),
+ nn.GELU(),
+ nn.Linear(hidden_size, hidden_size, device=device),
+ ))
+ self.blocks = nn.ModuleList(blocks)
+
+ self.final = nn.Sequential(
+ nn.LayerNorm(hidden_size, device=device),
+ nn.GELU(),
+ nn.Linear(hidden_size, output_size, device=device),
+ )
+
+ def forward(self, x: torch.Tensor, images: Optional[torch.Tensor] = None, patch_size: Optional[int] = None) -> torch.Tensor:
+ x = self.pre_norm(x)
+ x = self.fc1(x)
+ for block in self.blocks:
+ x = x + block(x)
+ x = self.final(x)
+
+ if self.upsample_factor > 1:
+ if images is None:
+ raise ValueError(f'`images` cannot be `None` when the head\'s `upsample_factor > 1`!')
+ if patch_size is None:
+ raise ValueError(f'`patch_size` cannot be `None` when the head\'s `upsample_factor > 1`!')
+ h, w = tuple(d // patch_size for d in images.shape[-2:])
+ x = rearrange(x, 'b (h w) (u1 u2 c) -> b (h u1 w u2) c',
+ h=h, w=w, u1=self.upsample_factor, u2=self.upsample_factor,
+ c=self._real_output_dim)
+
+ return x
+
+
+MLP_FACTORY = {
+ 'v1': MLP,
+ 'v2': MLP2,
+}
+
+
+def strip_prefix(state: Dict[str, torch.Tensor], prefix: str):
+ state = {
+ k[len(prefix):]: v
+ for k, v in state.items()
+ if k.startswith(prefix)
+ }
+ return state
+
+
+def get_mlp_info_from_state(version: str, state: Dict[str, torch.Tensor], prefix: str = '', spectral_weights: bool = False):
+ state = strip_prefix(state, prefix)
+
+ weight_suffix = 'weight' if not spectral_weights else 'parametrizations.weight.original'
+
+ if version == 'v1':
+ hidden_dim, input_dim = state[f'fc1.{weight_suffix}'].shape
+ output_dim = state[f'fc2.{weight_suffix}'].shape[0]
+
+ for num_inner in range(1000):
+ k = f'inner.{num_inner}.0.weight'
+ if k not in state:
+ break
+ elif version == 'v2':
+ hidden_dim, input_dim = state[f'fc1.{weight_suffix}'].shape
+ output_dim = state[f'final.2.{weight_suffix}'].shape[0]
+
+ for num_inner in range(1000):
+ k = f'blocks.{num_inner}.0.weight'
+ if k not in state:
+ break
+ else:
+ raise ValueError(f'Unsupported MLP version: {version}')
+
+ return input_dim, hidden_dim, output_dim, num_inner
+
+
+def create_mlp_from_config(version: str, input_dim: int, hidden_dim: int, output_dim: int, num_inner: int, **kwargs):
+ ret: nn.Module = MLP_FACTORY[version](input_dim, hidden_dim, output_dim, num_inner, from_config=True, **kwargs)
+
+ return ret
+
+
+def create_mlp_from_state(version: str, state: Dict[str, torch.Tensor], prefix: str = '', spectral_weights: bool = False, **kwargs):
+ state = strip_prefix(state, prefix)
+
+ input_dim, hidden_dim, output_dim, num_inner = get_mlp_info_from_state(version, state, spectral_weights=spectral_weights)
+
+ ret: nn.Module = create_mlp_from_config(version, input_dim, hidden_dim, output_dim, num_inner, **kwargs)
+
+ if spectral_weights:
+ enable_spectral_reparam(ret, init_norm_to_current=False, state_dict_guidance=state)
+
+ ret.load_state_dict(state)
+
+ if spectral_weights:
+ disable_spectral_reparam(ret)
+
+ return ret
diff --git a/tim/models/nvidia_radio/radio/adaptor_registry.py b/tim/models/nvidia_radio/radio/adaptor_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1dea8e898fe3b49fac1b647e20b6d818cb82bd1
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/adaptor_registry.py
@@ -0,0 +1,37 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+from argparse import Namespace
+from typing import Dict, Any
+
+import torch
+
+from .adaptor_generic import GenericAdaptor, AdaptorBase
+
+dict_t = Dict[str, Any]
+state_t = Dict[str, torch.Tensor]
+
+
+class AdaptorRegistry:
+ def __init__(self):
+ self._registry = {}
+
+ def register_adaptor(self, name):
+ def decorator(factory_function):
+ if name in self._registry:
+ raise ValueError(f"Model '{name}' already registered")
+ self._registry[name] = factory_function
+ return factory_function
+ return decorator
+
+ def create_adaptor(self, name, main_config: Namespace, adaptor_config: dict_t, state: state_t) -> AdaptorBase:
+ if name not in self._registry:
+ return GenericAdaptor(main_config, adaptor_config, state)
+ return self._registry[name](main_config, adaptor_config, state)
+
+# Creating an instance of the registry
+adaptor_registry = AdaptorRegistry()
diff --git a/tim/models/nvidia_radio/radio/block.py b/tim/models/nvidia_radio/radio/block.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf8c0ba708cf8befc38c77c5e559de85603ca1f1
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/block.py
@@ -0,0 +1,54 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+"""
+Block modules
+"""
+
+import torch
+import torch.nn as nn
+from timm.models.layers import DropPath
+
+from .conv import Conv
+# from .transformer import TransformerBlock
+
+__all__ = ('C2f', 'Bottleneck',)
+
+class C2f(nn.Module):
+ """Faster Implementation of CSP Bottleneck with 2 convolutions."""
+
+ def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, drop_path=None): # ch_in, ch_out, number, shortcut, groups, expansion
+ super().__init__()
+ if drop_path is None:
+ drop_path = [0.0] * n
+
+ self.c = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, 2 * self.c, 1, 1)
+ self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2)
+ self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0, drop_path=drop_path[i]) for i in range(n))
+
+ def forward(self, x):
+ """Forward pass through C2f layer."""
+ y = list(self.cv1(x).chunk(2, 1))
+ y.extend(m(y[-1]) for m in self.m)
+ return self.cv2(torch.cat(y, 1))
+
+ def forward_split(self, x):
+ """Forward pass using split() instead of chunk()."""
+ y = list(self.cv1(x).split((self.c, self.c), 1))
+ y.extend(m(y[-1]) for m in self.m)
+ return self.cv2(torch.cat(y, 1))
+
+
+class Bottleneck(nn.Module):
+ """Standard bottleneck."""
+
+ def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5, drop_path=0.0): # ch_in, ch_out, shortcut, groups, kernels, expand
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, k[0], 1)
+ self.cv2 = Conv(c_, c2, k[1], 1, g=g)
+ self.add = shortcut and c1 == c2
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+
+ def forward(self, x):
+ """'forward()' applies the YOLOv5 FPN to input data."""
+ return x + self.drop_path1(self.cv2(self.cv1(x))) if self.add else self.cv2(self.cv1(x))
diff --git a/tim/models/nvidia_radio/radio/cls_token.py b/tim/models/nvidia_radio/radio/cls_token.py
new file mode 100644
index 0000000000000000000000000000000000000000..b41351238f557303456575916d7d520cc789b487
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/cls_token.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+from typing import Optional
+
+import torch
+from torch import nn
+
+
+class ClsToken(nn.Module):
+ def __init__(self, ndim: int,
+ num_tokens: int = 1,
+ enabled: bool = True,
+ register_multiple: Optional[int] = None,
+ num_registers: Optional[int] = None,
+ ):
+ super().__init__()
+
+ self.ndim = ndim
+ self.enabled = enabled
+ self.num_registers = 0
+ self.num_tokens = num_tokens
+ if enabled:
+ if num_registers:
+ self.num_registers = num_registers
+ elif register_multiple:
+ self.num_registers = register_multiple - (num_tokens % register_multiple)
+
+ scale = ndim ** -0.5
+ self.token = nn.Parameter(torch.randn(num_tokens + self.num_registers, ndim) * scale)
+ else:
+ self.token = None
+
+ self.num_patches = self.num_tokens + self.num_registers
+
+ def disable(self):
+ self.token = None
+ self.enabled = False
+
+ def forward(self, x: torch.Tensor):
+ if self.token is None:
+ return x
+
+ token = self.token.unsqueeze(0).expand(x.shape[0], -1, -1)
+ x = torch.cat([
+ token,
+ x,
+ ], dim=1)
+
+ return x
+
+ def no_weight_decay(self):
+ return [
+ 'token',
+ ]
diff --git a/tim/models/nvidia_radio/radio/common.py b/tim/models/nvidia_radio/radio/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d93ea42ccd1a7f76f7e30e512c91b26366f1492
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/common.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+from dataclasses import dataclass
+from typing import Optional
+
+from .radio_model import Resolution
+
+
+@dataclass
+class RadioResource:
+ url: str
+ patch_size: int
+ max_resolution: int
+ preferred_resolution: Resolution
+ vitdet_num_windowed: Optional[int] = None
+ vitdet_num_global: Optional[int] = None
+
+
+RESOURCE_MAP = {
+ # RADIOv2.5
+ "radio_v2.5-b": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio-v2.5-b_half.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=(768, 768),
+ vitdet_num_global=4,
+ ),
+ "radio_v2.5-l": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio-v2.5-l_half.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=(768, 768),
+ vitdet_num_global=4,
+ ),
+ "radio_v2.5-h": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.5-h.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=(768, 768),
+ vitdet_num_global=4,
+ ),
+ "radio_v2.5-h-norm": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.5-h-norm.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=(768, 768),
+ vitdet_num_global=4,
+ ),
+ "radio_v2.5-g": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.5-g.pth.tar?download=true",
+ patch_size=14,
+ max_resolution=1792,
+ preferred_resolution=(896, 896),
+ vitdet_num_global=8,
+ ),
+ # RADIO
+ "radio_v2.1": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.1_bf16.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=Resolution(432, 432),
+ vitdet_num_windowed=5,
+ ),
+ "radio_v2": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=Resolution(432, 432),
+ vitdet_num_windowed=5,
+ ),
+ "radio_v1": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/radio_v1.pth.tar?download=true",
+ patch_size=14,
+ max_resolution=1050,
+ preferred_resolution=Resolution(378, 378),
+ ),
+ # E-RADIO
+ "e-radio_v2": RadioResource(
+ "https://huggingface.co/nvidia/RADIO/resolve/main/eradio_v2.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=Resolution(512, 512),
+ ),
+ # C-RADIO
+ "c-radio_v2.5-g": RadioResource(
+ "https://huggingface.co/nvidia/C-RADIOv2-g/resolve/main/c-radio_v2-g_half.pth.tar",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=(768, 768),
+ vitdet_num_global=8,
+ ),
+ "c-radio_v3-l": RadioResource(
+ # NOTE: Currently, this model cannot be loaded via TorchHub. Instead, use the transformers API at https://huggingface.co/nvidia/C-RADIOv3-L
+ # and accept the license terms.
+ "https://huggingface.co/nvidia/C-RADIOv3-L/resolve/main/c-radio-v3_l_half.pth.tar?download=true",
+ patch_size=16,
+ max_resolution=2048,
+ preferred_resolution=Resolution(512, 512),
+ ),
+}
+
+DEFAULT_VERSION = "radio_v2.5-h"
diff --git a/tim/models/nvidia_radio/radio/conv.py b/tim/models/nvidia_radio/radio/conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7295a9f07ad57b3bd9cd7d5108e54657cc9d6d
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/conv.py
@@ -0,0 +1,65 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+"""
+Convolution modules
+"""
+
+import math
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+__all__ = ('Conv', 'LightConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv',
+ 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'RepConv')
+
+
+def autopad(k, p=None, d=1): # kernel, padding, dilation
+ """Pad to 'same' shape outputs."""
+ if d > 1:
+ k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
+ if p is None:
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
+ return p
+
+# Pavlo's implementation with switch to deploy
+class Conv(nn.Module):
+ default_act = nn.SiLU() # default activation
+
+ def __init__(self, a, b, kernel_size=1, stride=1, padding=None, g=1, dilation=1, bn_weight_init=1, bias=False, act=True):
+ super().__init__()
+
+ self.conv = torch.nn.Conv2d(a, b, kernel_size, stride, autopad(kernel_size, padding, dilation), dilation, g, bias=False)
+ if 1:
+ self.bn = torch.nn.BatchNorm2d(b)
+ torch.nn.init.constant_(self.bn.weight, bn_weight_init)
+ torch.nn.init.constant_(self.bn.bias, 0)
+ self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
+
+
+ def forward(self,x):
+ x = self.conv(x)
+ x = self.bn(x)
+ x = self.act(x)
+ return x
+
+ @torch.no_grad()
+ def switch_to_deploy(self):
+ if not isinstance(self.bn, nn.Identity):
+ # return 1
+ c, bn = self.conv, self.bn
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
+ w = c.weight * w[:, None, None, None]
+ b = bn.bias - bn.running_mean * bn.weight / \
+ (bn.running_var + bn.eps)**0.5
+ # m = torch.nn.Conv2d(w.size(1) * c.groups,
+ # w.size(0),
+ # w.shape[2:],
+ # stride=c.stride,
+ # padding=c.padding,
+ # dilation=c.dilation,
+ # groups=c.groups)
+ self.conv.weight.data.copy_(w)
+ self.conv.bias = nn.Parameter(b)
+ # self.conv.bias.data.copy_(b)
+ # self.conv = m.to(c.weight.device)
+ self.bn = nn.Identity()
diff --git a/tim/models/nvidia_radio/radio/dinov2_arch.py b/tim/models/nvidia_radio/radio/dinov2_arch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ba9e8ba645194b4b9c742b2e80e712e3af8d67a
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/dinov2_arch.py
@@ -0,0 +1,1016 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# References:
+# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
+# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
+
+# Nvidia
+# NOTE: We re-define this model architecture primarily so that we don't have to worry about version compatibility breaking,
+# but also because Huggingface does a string replace of `gamma` to something else when loading the model state,
+# and this breaks loading of this model.
+
+from enum import Enum
+from functools import partial
+import logging
+import math
+import os
+import sys
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+import warnings
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.nn.init import trunc_normal_
+
+_torch_has_sdpa = hasattr(F, 'scaled_dot_product_attention')
+
+
+XFORMERS_ENABLED = os.environ.get("XFORMERS_DISABLED") is None
+try:
+ if XFORMERS_ENABLED:
+ from xformers.ops import fmha, scaled_index_add, index_select_cat, SwiGLU, memory_efficient_attention, unbind
+
+ XFORMERS_AVAILABLE = True
+ else:
+ raise ImportError
+except ImportError:
+ XFORMERS_AVAILABLE = False
+
+
+def make_2tuple(x):
+ if isinstance(x, tuple):
+ assert len(x) == 2
+ return x
+
+ assert isinstance(x, int)
+ return (x, x)
+
+
+class PatchEmbed(nn.Module):
+ """
+ 2D image to patch embedding: (B,C,H,W) -> (B,N,D)
+
+ Args:
+ img_size: Image size.
+ patch_size: Patch token size.
+ in_chans: Number of input image channels.
+ embed_dim: Number of linear projection output channels.
+ norm_layer: Normalization layer.
+ """
+
+ def __init__(
+ self,
+ img_size: Union[int, Tuple[int, int]] = 224,
+ patch_size: Union[int, Tuple[int, int]] = 16,
+ in_chans: int = 3,
+ embed_dim: int = 768,
+ norm_layer: Optional[Callable] = None,
+ flatten_embedding: bool = True,
+ ) -> None:
+ super().__init__()
+
+ image_HW = make_2tuple(img_size)
+ patch_HW = make_2tuple(patch_size)
+ patch_grid_size = (
+ image_HW[0] // patch_HW[0],
+ image_HW[1] // patch_HW[1],
+ )
+
+ self.img_size = image_HW
+ self.patch_size = patch_HW
+ self.patches_resolution = patch_grid_size
+ self.num_patches = patch_grid_size[0] * patch_grid_size[1]
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ self.flatten_embedding = flatten_embedding
+
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ _, _, H, W = x.shape
+ patch_H, patch_W = self.patch_size
+
+ assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
+ assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
+
+ x = self.proj(x) # B C H W
+ H, W = x.size(2), x.size(3)
+ x = x.flatten(2).transpose(1, 2) # B HW C
+ x = self.norm(x)
+ if not self.flatten_embedding:
+ x = x.reshape(-1, H, W, self.embed_dim) # B H W C
+ return x
+
+ def flops(self) -> float:
+ Ho, Wo = self.patches_resolution
+ flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
+ if self.norm is not None:
+ flops += Ho * Wo * self.embed_dim
+ return flops
+
+
+class Attention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int = 8,
+ qkv_bias: bool = False,
+ proj_bias: bool = True,
+ attn_drop: float = 0.0,
+ proj_drop: float = 0.0,
+ ) -> None:
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = head_dim**-0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim, bias=proj_bias)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ B, N, C = x.shape
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+
+ q, k, v = qkv[0], qkv[1], qkv[2]
+ if _torch_has_sdpa:
+ x = F.scaled_dot_product_attention(
+ q, k, v,
+ is_causal=False,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ scale=self.scale,
+ )
+ else:
+ q = q * self.scale
+ attn = q @ k.transpose(-2, -1)
+
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+ x = attn @ v
+
+ x = x.transpose(1, 2).reshape(B, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class MemEffAttention(Attention):
+ def forward(self, x: torch.Tensor, attn_bias=None) -> torch.Tensor:
+ if not XFORMERS_AVAILABLE:
+ if attn_bias is not None:
+ raise AssertionError("xFormers is required for using nested tensors")
+ return super().forward(x)
+
+ B, N, C = x.shape
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
+
+ q, k, v = unbind(qkv, 2)
+
+ x = memory_efficient_attention(q, k, v, attn_bias=attn_bias)
+ x = x.reshape([B, N, C])
+
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class Mlp(nn.Module):
+ def __init__(
+ self,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ act_layer: Callable[..., nn.Module] = nn.GELU,
+ drop: float = 0.0,
+ bias: bool = True,
+ ) -> None:
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+class SwiGLUFFN(nn.Module):
+ def __init__(
+ self,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ act_layer: Callable[..., nn.Module] = None,
+ drop: float = 0.0,
+ bias: bool = True,
+ ) -> None:
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
+ self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x12 = self.w12(x)
+ x1, x2 = x12.chunk(2, dim=-1)
+ hidden = F.silu(x1) * x2
+ return self.w3(hidden)
+
+
+if not XFORMERS_AVAILABLE:
+ SwiGLU = SwiGLUFFN
+
+
+class SwiGLUFFNFused(SwiGLU):
+ def __init__(
+ self,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ act_layer: Callable[..., nn.Module] = None,
+ drop: float = 0.0,
+ bias: bool = True,
+ ) -> None:
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
+ super().__init__(
+ in_features=in_features,
+ hidden_features=hidden_features,
+ out_features=out_features,
+ bias=bias,
+ )
+
+
+def drop_path(x, drop_prob: float = 0.0, training: bool = False):
+ if drop_prob == 0.0 or not training:
+ return x
+ keep_prob = 1 - drop_prob
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
+ if keep_prob > 0.0:
+ random_tensor.div_(keep_prob)
+ output = x * random_tensor
+ return output
+
+
+class DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob=None):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, x):
+ return drop_path(x, self.drop_prob, self.training)
+
+
+class LayerScale(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ init_values: Union[float, torch.Tensor] = 1e-5,
+ inplace: bool = False,
+ ) -> None:
+ super().__init__()
+ self.inplace = inplace
+ self.grandma = nn.Parameter(init_values * torch.ones(dim))
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ return x.mul_(self.grandma) if self.inplace else x * self.grandma
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
+ # Huggingface is absurd and it will rename strings that contain `gamma`, which means that the normal DINO implementation
+ # of LayerScale won't work with HFHub. So we rename the variable to 'grandma', and support loading checkpoints in either
+ # format
+ key_a = f'{prefix}gamma'
+ key_b = f'{prefix}grandma'
+ if key_a in state_dict:
+ gamma = state_dict[key_a]
+ elif key_b in state_dict:
+ gamma = state_dict[key_b]
+ else:
+ if strict:
+ raise KeyError(f"Couldn't find the key {key_a} nor {key_b} in the state dict!")
+ else:
+ missing_keys.append(key_a)
+ missing_keys.append(key_b)
+ unexpected_keys.extend(state_dict.keys())
+ gamma = None
+
+ if gamma is not None:
+ self.grandma.data.copy_(gamma)
+
+ # return super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
+
+
+class Block(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int,
+ mlp_ratio: float = 4.0,
+ qkv_bias: bool = False,
+ proj_bias: bool = True,
+ ffn_bias: bool = True,
+ drop: float = 0.0,
+ attn_drop: float = 0.0,
+ init_values=None,
+ drop_path: float = 0.0,
+ act_layer: Callable[..., nn.Module] = nn.GELU,
+ norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
+ attn_class: Callable[..., nn.Module] = Attention,
+ ffn_layer: Callable[..., nn.Module] = Mlp,
+ ) -> None:
+ super().__init__()
+ # print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
+ self.norm1 = norm_layer(dim)
+ self.attn = attn_class(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ proj_bias=proj_bias,
+ attn_drop=attn_drop,
+ proj_drop=drop,
+ )
+ self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = ffn_layer(
+ in_features=dim,
+ hidden_features=mlp_hidden_dim,
+ act_layer=act_layer,
+ drop=drop,
+ bias=ffn_bias,
+ )
+ self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ self.sample_drop_ratio = drop_path
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ def attn_residual_func(x: torch.Tensor) -> torch.Tensor:
+ return self.ls1(self.attn(self.norm1(x)))
+
+ def ffn_residual_func(x: torch.Tensor) -> torch.Tensor:
+ return self.ls2(self.mlp(self.norm2(x)))
+
+ if self.training and self.sample_drop_ratio > 0.1:
+ # the overhead is compensated only for a drop path rate larger than 0.1
+ x = drop_add_residual_stochastic_depth(
+ x,
+ residual_func=attn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ )
+ x = drop_add_residual_stochastic_depth(
+ x,
+ residual_func=ffn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ )
+ elif self.training and self.sample_drop_ratio > 0.0:
+ x = x + self.drop_path1(attn_residual_func(x))
+ x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
+ else:
+ x = x + attn_residual_func(x)
+ x = x + ffn_residual_func(x)
+ return x
+
+
+class NestedTensorBlock(Block):
+ def forward_nested(self, x_list: List[torch.Tensor]) -> List[torch.Tensor]:
+ """
+ x_list contains a list of tensors to nest together and run
+ """
+ assert isinstance(self.attn, MemEffAttention)
+
+ if self.training and self.sample_drop_ratio > 0.0:
+
+ def attn_residual_func(x: torch.Tensor, attn_bias=None) -> torch.Tensor:
+ return self.attn(self.norm1(x), attn_bias=attn_bias)
+
+ def ffn_residual_func(x: torch.Tensor, attn_bias=None) -> torch.Tensor:
+ return self.mlp(self.norm2(x))
+
+ x_list = drop_add_residual_stochastic_depth_list(
+ x_list,
+ residual_func=attn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ scaling_vector=self.ls1.grandma if isinstance(self.ls1, LayerScale) else None,
+ )
+ x_list = drop_add_residual_stochastic_depth_list(
+ x_list,
+ residual_func=ffn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ scaling_vector=self.ls2.grandma if isinstance(self.ls1, LayerScale) else None,
+ )
+ return x_list
+ else:
+
+ def attn_residual_func(x: torch.Tensor, attn_bias=None) -> torch.Tensor:
+ return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
+
+ def ffn_residual_func(x: torch.Tensor, attn_bias=None) -> torch.Tensor:
+ return self.ls2(self.mlp(self.norm2(x)))
+
+ attn_bias, x = get_attn_bias_and_cat(x_list)
+ x = x + attn_residual_func(x, attn_bias=attn_bias)
+ x = x + ffn_residual_func(x)
+ return attn_bias.split(x)
+
+ def forward(self, x_or_x_list):
+ if isinstance(x_or_x_list, torch.Tensor):
+ return super().forward(x_or_x_list)
+ elif isinstance(x_or_x_list, list):
+ if not XFORMERS_AVAILABLE:
+ raise AssertionError("xFormers is required for using nested tensors")
+ return self.forward_nested(x_or_x_list)
+ else:
+ raise AssertionError
+
+
+def drop_add_residual_stochastic_depth(
+ x: torch.Tensor,
+ residual_func: Callable[[torch.Tensor], torch.Tensor],
+ sample_drop_ratio: float = 0.0,
+) -> torch.Tensor:
+ # 1) extract subset using permutation
+ b, n, d = x.shape
+ sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
+ brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
+ x_subset = x[brange]
+
+ # 2) apply residual_func to get residual
+ residual = residual_func(x_subset)
+
+ x_flat = x.flatten(1)
+ residual = residual.flatten(1)
+
+ residual_scale_factor = b / sample_subset_size
+
+ # 3) add the residual
+ x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
+ return x_plus_residual.view_as(x)
+
+
+def get_branges_scales(x, sample_drop_ratio=0.0):
+ b, n, d = x.shape
+ sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
+ brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
+ residual_scale_factor = b / sample_subset_size
+ return brange, residual_scale_factor
+
+
+def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
+ if scaling_vector is None:
+ x_flat = x.flatten(1)
+ residual = residual.flatten(1)
+ x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
+ else:
+ x_plus_residual = scaled_index_add(
+ x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
+ )
+ return x_plus_residual
+
+
+attn_bias_cache: Dict[Tuple, Any] = {}
+
+
+def get_attn_bias_and_cat(x_list, branges=None):
+ """
+ this will perform the index select, cat the tensors, and provide the attn_bias from cache
+ """
+ batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
+ all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
+ if all_shapes not in attn_bias_cache.keys():
+ seqlens = []
+ for b, x in zip(batch_sizes, x_list):
+ for _ in range(b):
+ seqlens.append(x.shape[1])
+ attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
+ attn_bias._batch_sizes = batch_sizes
+ attn_bias_cache[all_shapes] = attn_bias
+
+ if branges is not None:
+ cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
+ else:
+ tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
+ cat_tensors = torch.cat(tensors_bs1, dim=1)
+
+ return attn_bias_cache[all_shapes], cat_tensors
+
+
+def drop_add_residual_stochastic_depth_list(
+ x_list: List[torch.Tensor],
+ residual_func: Callable[[torch.Tensor, Any], torch.Tensor],
+ sample_drop_ratio: float = 0.0,
+ scaling_vector=None,
+) -> torch.Tensor:
+ # 1) generate random set of indices for dropping samples in the batch
+ branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
+ branges = [s[0] for s in branges_scales]
+ residual_scale_factors = [s[1] for s in branges_scales]
+
+ # 2) get attention bias and index+concat the tensors
+ attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
+
+ # 3) apply residual_func to get residual, and split the result
+ residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
+
+ outputs = []
+ for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
+ outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
+ return outputs
+
+
+def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
+ if not depth_first and include_root:
+ fn(module=module, name=name)
+ for child_name, child_module in module.named_children():
+ child_name = ".".join((name, child_name)) if name else child_name
+ named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
+ if depth_first and include_root:
+ fn(module=module, name=name)
+ return module
+
+
+class BlockChunk(nn.ModuleList):
+ def forward(self, x):
+ for b in self:
+ x = b(x)
+ return x
+
+
+class DinoVisionTransformer(nn.Module):
+ def __init__(
+ self,
+ img_size=224,
+ patch_size=16,
+ in_chans=3,
+ embed_dim=768,
+ depth=12,
+ num_heads=12,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ ffn_bias=True,
+ proj_bias=True,
+ drop_path_rate=0.0,
+ drop_path_uniform=False,
+ init_values=None, # for layerscale: None or 0 => no layerscale
+ embed_layer=PatchEmbed,
+ act_layer=nn.GELU,
+ block_fn=Block,
+ ffn_layer="mlp",
+ block_chunks=1,
+ num_register_tokens=0,
+ interpolate_antialias=False,
+ interpolate_offset=0.1,
+ ):
+ """
+ Args:
+ img_size (int, tuple): input image size
+ patch_size (int, tuple): patch size
+ in_chans (int): number of input channels
+ embed_dim (int): embedding dimension
+ depth (int): depth of transformer
+ num_heads (int): number of attention heads
+ mlp_ratio (int): ratio of mlp hidden dim to embedding dim
+ qkv_bias (bool): enable bias for qkv if True
+ proj_bias (bool): enable bias for proj in attn if True
+ ffn_bias (bool): enable bias for ffn if True
+ drop_path_rate (float): stochastic depth rate
+ drop_path_uniform (bool): apply uniform drop rate across blocks
+ weight_init (str): weight init scheme
+ init_values (float): layer-scale init values
+ embed_layer (nn.Module): patch embedding layer
+ act_layer (nn.Module): MLP activation layer
+ block_fn (nn.Module): transformer block class
+ ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
+ block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
+ num_register_tokens: (int) number of extra cls tokens (so-called "registers")
+ interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
+ interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings
+ """
+ super().__init__()
+ norm_layer = partial(nn.LayerNorm, eps=1e-6)
+
+ self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
+ self.num_tokens = 1
+ self.n_blocks = depth
+ self.num_heads = num_heads
+ self.patch_size = patch_size
+ self.num_register_tokens = num_register_tokens
+ self.interpolate_antialias = interpolate_antialias
+ self.interpolate_offset = interpolate_offset
+
+ self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
+ num_patches = self.patch_embed.num_patches
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
+ assert num_register_tokens >= 0
+ self.register_tokens = (
+ nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None
+ )
+
+ if drop_path_uniform is True:
+ dpr = [drop_path_rate] * depth
+ else:
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
+
+ if ffn_layer == "mlp":
+ ffn_layer = Mlp
+ elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
+ ffn_layer = SwiGLUFFNFused
+ elif ffn_layer == "identity":
+ def f(*args, **kwargs):
+ return nn.Identity()
+
+ ffn_layer = f
+ else:
+ raise NotImplementedError
+
+ blocks_list = [
+ block_fn(
+ dim=embed_dim,
+ num_heads=num_heads,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ proj_bias=proj_bias,
+ ffn_bias=ffn_bias,
+ drop_path=dpr[i],
+ norm_layer=norm_layer,
+ act_layer=act_layer,
+ ffn_layer=ffn_layer,
+ init_values=init_values,
+ )
+ for i in range(depth)
+ ]
+ if block_chunks > 0:
+ self.chunked_blocks = True
+ chunked_blocks = []
+ chunksize = depth // block_chunks
+ for i in range(0, depth, chunksize):
+ # this is to keep the block index consistent if we chunk the block list
+ chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
+ self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
+ else:
+ self.chunked_blocks = False
+ self.blocks = nn.ModuleList(blocks_list)
+
+ self.norm = norm_layer(embed_dim)
+ self.head = nn.Identity()
+
+ self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
+
+ def interpolate_pos_encoding(self, x, w, h):
+ previous_dtype = x.dtype
+ npatch = x.shape[1] - 1
+ N = self.pos_embed.shape[1] - 1
+ if npatch == N and w == h:
+ return self.pos_embed
+ pos_embed = self.pos_embed.float()
+ class_pos_embed = pos_embed[:, 0]
+ patch_pos_embed = pos_embed[:, 1:]
+ dim = x.shape[-1]
+ w0 = w // self.patch_size
+ h0 = h // self.patch_size
+ M = int(math.sqrt(N)) # Recover the number of patches in each dimension
+ assert N == M * M
+ kwargs = {}
+ if self.interpolate_offset:
+ # Historical kludge: add a small number to avoid floating point error in the interpolation, see https://github.com/facebookresearch/dino/issues/8
+ # Note: still needed for backward-compatibility, the underlying operators are using both output size and scale factors
+ sx = float(w0 + self.interpolate_offset) / M
+ sy = float(h0 + self.interpolate_offset) / M
+ kwargs["scale_factor"] = (sx, sy)
+ else:
+ # Simply specify an output size instead of a scale factor
+ kwargs["size"] = (w0, h0)
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed.reshape(1, M, M, dim).permute(0, 3, 1, 2),
+ mode="bicubic",
+ antialias=self.interpolate_antialias,
+ **kwargs,
+ )
+ assert (w0, h0) == patch_pos_embed.shape[-2:]
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
+
+ def prepare_tokens_with_masks(self, x, masks=None):
+ B, nc, w, h = x.shape
+ x = self.patch_embed(x)
+ if masks is not None:
+ x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
+
+ x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
+ x = x + self.interpolate_pos_encoding(x, w, h)
+
+ if self.register_tokens is not None:
+ x = torch.cat(
+ (
+ x[:, :1],
+ self.register_tokens.expand(x.shape[0], -1, -1),
+ x[:, 1:],
+ ),
+ dim=1,
+ )
+
+ return x
+
+ def forward_features_list(self, x_list, masks_list):
+ x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
+ for blk in self.blocks:
+ x = blk(x)
+
+ all_x = x
+ output = []
+ for x, masks in zip(all_x, masks_list):
+ x_norm = self.norm(x)
+ output.append(
+ {
+ "x_norm_clstoken": x_norm[:, 0],
+ "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
+ "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
+ "x_prenorm": x,
+ "masks": masks,
+ }
+ )
+ return output
+
+ def forward_features(self, x, masks=None):
+ if isinstance(x, list):
+ return self.forward_features_list(x, masks)
+
+ x = self.prepare_tokens_with_masks(x, masks)
+
+ for blk in self.blocks:
+ x = blk(x)
+
+ x_norm = self.norm(x)
+ return {
+ "x_norm_clstoken": x_norm[:, 0],
+ "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
+ "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
+ "x_prenorm": x,
+ "masks": masks,
+ }
+
+ def _get_intermediate_layers_not_chunked(self, x, n=1):
+ x = self.prepare_tokens_with_masks(x)
+ # If n is an int, take the n last blocks. If it's a list, take them
+ output, total_block_len = [], len(self.blocks)
+ blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
+ for i, blk in enumerate(self.blocks):
+ x = blk(x)
+ if i in blocks_to_take:
+ output.append(x)
+ assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
+ return output
+
+ def _get_intermediate_layers_chunked(self, x, n=1):
+ x = self.prepare_tokens_with_masks(x)
+ output, i, total_block_len = [], 0, len(self.blocks[-1])
+ # If n is an int, take the n last blocks. If it's a list, take them
+ blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
+ for block_chunk in self.blocks:
+ for blk in block_chunk[i:]: # Passing the nn.Identity()
+ x = blk(x)
+ if i in blocks_to_take:
+ output.append(x)
+ i += 1
+ assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
+ return output
+
+ def get_intermediate_layers(
+ self,
+ x: torch.Tensor,
+ n: Union[int, Sequence] = 1, # Layers or n last layers to take
+ reshape: bool = False,
+ return_class_token: bool = False,
+ norm=True,
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
+ if self.chunked_blocks:
+ outputs = self._get_intermediate_layers_chunked(x, n)
+ else:
+ outputs = self._get_intermediate_layers_not_chunked(x, n)
+ if norm:
+ outputs = [self.norm(out) for out in outputs]
+ class_tokens = [out[:, 0] for out in outputs]
+ outputs = [out[:, 1 + self.num_register_tokens :] for out in outputs]
+ if reshape:
+ B, _, w, h = x.shape
+ outputs = [
+ out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
+ for out in outputs
+ ]
+ if return_class_token:
+ return tuple(zip(outputs, class_tokens))
+ return tuple(outputs)
+
+ def forward(self, *args, is_training=False, **kwargs):
+ ret = self.forward_features(*args, **kwargs)
+ if is_training:
+ return ret
+ else:
+ return self.head(ret["x_norm_clstoken"])
+
+
+def vit_small(patch_size=16, num_register_tokens=0, **kwargs):
+ model = DinoVisionTransformer(
+ patch_size=patch_size,
+ embed_dim=384,
+ depth=12,
+ num_heads=6,
+ mlp_ratio=4,
+ block_fn=partial(Block, attn_class=MemEffAttention),
+ num_register_tokens=num_register_tokens,
+ **kwargs,
+ )
+ return model
+
+
+def vit_base(patch_size=16, num_register_tokens=0, **kwargs):
+ model = DinoVisionTransformer(
+ patch_size=patch_size,
+ embed_dim=768,
+ depth=12,
+ num_heads=12,
+ mlp_ratio=4,
+ block_fn=partial(Block, attn_class=MemEffAttention),
+ num_register_tokens=num_register_tokens,
+ **kwargs,
+ )
+ return model
+
+
+def vit_large(patch_size=16, num_register_tokens=0, **kwargs):
+ model = DinoVisionTransformer(
+ patch_size=patch_size,
+ embed_dim=1024,
+ depth=24,
+ num_heads=16,
+ mlp_ratio=4,
+ block_fn=partial(Block, attn_class=MemEffAttention),
+ num_register_tokens=num_register_tokens,
+ **kwargs,
+ )
+ return model
+
+
+def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs):
+ """
+ Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
+ """
+ model = DinoVisionTransformer(
+ patch_size=patch_size,
+ embed_dim=1536,
+ depth=40,
+ num_heads=24,
+ mlp_ratio=4,
+ block_fn=partial(Block, attn_class=MemEffAttention),
+ num_register_tokens=num_register_tokens,
+ **kwargs,
+ )
+ return model
+
+
+class Weights(Enum):
+ LVD142M = "LVD142M"
+
+
+def _make_dinov2_model(
+ *,
+ arch_name: str = "vit_large",
+ img_size: int = 518,
+ patch_size: int = 14,
+ init_values: float = 1.0,
+ ffn_layer: str = "mlp",
+ block_chunks: int = 0,
+ num_register_tokens: int = 0,
+ interpolate_antialias: bool = False,
+ interpolate_offset: float = 0.1,
+ weights: Union[Weights, str] = Weights.LVD142M,
+ **kwargs,
+):
+ if isinstance(weights, str):
+ try:
+ weights = Weights[weights]
+ except KeyError:
+ raise AssertionError(f"Unsupported weights: {weights}")
+
+ vit_kwargs = dict(
+ img_size=img_size,
+ patch_size=patch_size,
+ init_values=init_values,
+ ffn_layer=ffn_layer,
+ block_chunks=block_chunks,
+ num_register_tokens=num_register_tokens,
+ interpolate_antialias=interpolate_antialias,
+ interpolate_offset=interpolate_offset,
+ )
+ vit_kwargs.update(**kwargs)
+ model = sys.modules[__name__].__dict__[arch_name](**vit_kwargs)
+
+ return model
+
+
+def dinov2_vits14(**kwargs):
+ """
+ DINOv2 ViT-S/14 model (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(arch_name="vit_small", **kwargs)
+
+
+def dinov2_vitb14(**kwargs):
+ """
+ DINOv2 ViT-B/14 model (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(arch_name="vit_base", **kwargs)
+
+
+def dinov2_vitl14(**kwargs):
+ """
+ DINOv2 ViT-L/14 model (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(arch_name="vit_large", **kwargs)
+
+
+def dinov2_vitg14(**kwargs):
+ """
+ DINOv2 ViT-g/14 model (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(
+ arch_name="vit_giant2",
+ ffn_layer="swiglufused",
+ **kwargs,
+ )
+
+
+def dinov2_vits14_reg(**kwargs):
+ """
+ DINOv2 ViT-S/14 model with registers (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(
+ arch_name="vit_small",
+ num_register_tokens=4,
+ interpolate_antialias=True,
+ interpolate_offset=0.0,
+ **kwargs,
+ )
+
+
+def dinov2_vitb14_reg(**kwargs):
+ """
+ DINOv2 ViT-B/14 model with registers (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(
+ arch_name="vit_base",
+ num_register_tokens=4,
+ interpolate_antialias=True,
+ interpolate_offset=0.0,
+ **kwargs,
+ )
+
+
+def dinov2_vitl14_reg(**kwargs):
+ """
+ DINOv2 ViT-L/14 model with registers (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(
+ arch_name="vit_large",
+ num_register_tokens=4,
+ interpolate_antialias=True,
+ interpolate_offset=0.0,
+ **kwargs,
+ )
+
+
+def dinov2_vitg14_reg(**kwargs):
+ """
+ DINOv2 ViT-g/14 model with registers (optionally) pretrained on the LVD-142M dataset.
+ """
+ return _make_dinov2_model(
+ arch_name="vit_giant2",
+ ffn_layer="swiglufused",
+ num_register_tokens=4,
+ interpolate_antialias=True,
+ interpolate_offset=0.0,
+ **kwargs,
+ )
diff --git a/tim/models/nvidia_radio/radio/dual_hybrid_vit.py b/tim/models/nvidia_radio/radio/dual_hybrid_vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..da30449d526001e7c918ad02cf55ff810aab5270
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/dual_hybrid_vit.py
@@ -0,0 +1,213 @@
+from logging import getLogger
+from typing import Tuple
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from timm.models import register_model
+from timm.models import vision_transformer as tvit
+from timm.models import convnext as tconv
+
+from einops import rearrange
+
+from . import extra_timm_models as et
+
+
+class Fuser(nn.Module):
+ def __init__(self, src_dim: int, tgt_dim: int, gated: bool = True):
+ super().__init__()
+ self.gated = gated
+
+ mid_dim = max(src_dim, tgt_dim) * 2
+
+ self.fwd = nn.Sequential(
+ nn.Conv2d(src_dim, mid_dim, kernel_size=3, stride=1, padding=1),
+ nn.GELU(),
+ nn.Conv2d(mid_dim, tgt_dim * (2 if gated else 1), kernel_size=3, stride=1, padding=1),
+ )
+
+ def forward(self, src: torch.Tensor, tgt: torch.Tensor) -> torch.Tensor:
+ if src.ndim == 3:
+ shape = tgt.shape[-2:]
+ else:
+ shape = src.shape[-2:]
+
+ nd = shape[0] * shape[1]
+
+ if src.ndim == 3:
+ src = src[:, -nd:].reshape(src.shape[0], src.shape[2], *shape)
+
+ if tgt.ndim == 3:
+ tgt_pre = tgt[:, :-nd]
+ tgt = tgt[:, -nd:].reshape(tgt.shape[0], tgt.shape[2], *shape)
+ else:
+ tgt_pre = None
+
+ pred = self.fwd(src)
+
+ if self.gated:
+ g, pred = torch.chunk(pred, 2, dim=1)
+
+ g = F.sigmoid(g)
+
+ pred = g * pred
+
+ tgt = tgt + pred
+
+ if tgt_pre is not None:
+ tgt = rearrange(tgt, 'b c h w -> b (h w) c')
+ tgt = torch.cat([tgt_pre, tgt], dim=1)
+
+ return tgt
+
+
+class AttnDownsample(nn.Module):
+ def __init__(self, dim: int, window_size: int, num_heads: int = 16):
+ super().__init__()
+ self.q = nn.Parameter(torch.randn(1, num_heads, 1, dim // num_heads) * 0.01)
+ self.kv = nn.Linear(dim, dim * 2)
+ self.proj = nn.Linear(dim, dim)
+ self.window_size = window_size
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.scale = self.head_dim ** -0.5
+
+ def forward(self, x: torch.Tensor, twod_shape: Tuple[int, int]) -> torch.Tensor:
+ ntok = twod_shape[0] * twod_shape[1]
+ x_pre = x[:, :-ntok]
+
+ B = x.shape[0]
+ ds_hw = tuple(s // self.window_size for s in twod_shape)
+
+ x_spat = rearrange(
+ x[:, -ntok:],
+ 'b (h d1 w d2) c -> (b h w) (d1 d2) c',
+ h=ds_hw[0], w=ds_hw[1],
+ d1=self.window_size, d2=self.window_size,
+ )
+
+ B, N, C = x_spat.shape
+
+ k, v = self.kv(x_spat).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
+
+ q = (self.q * self.scale).expand(B, -1, -1, -1)
+ attn = q @ k.transpose(-2, -1)
+ attn = F.softmax(attn, dim=-1)
+ x = attn @ v
+
+ x = x.transpose(1, 2).reshape(B, C)
+ x = self.proj(x)
+
+ x = rearrange(x, '(b h w) c -> b (h w) c', b=x_pre.shape[0], h=ds_hw[0], w=ds_hw[1])
+
+ x = torch.cat([x_pre, x], dim=1)
+ return x
+
+
+class HybridModel(nn.Module):
+ def __init__(self, vit: tvit.VisionTransformer, conv: tconv.ConvNeXt, pretrained: bool = False,
+ concatenate: bool = False, **kwargs):
+ super().__init__()
+ self.conv = conv
+ self.vit = vit
+ self.concatenate = concatenate
+
+ conv.stages = nn.ModuleList(conv.stages)
+ vit.blocks = nn.ModuleList(vit.blocks)
+
+ self._half_vit_idx = len(vit.blocks) // 2 + 1
+
+ self._half_conv_idx = None
+ x = torch.empty(1, 3, 256, 256)
+ x = self.conv.stem(x)
+ for i in range(len(conv.stages)):
+ x = conv.stages[i](x)
+ if self._half_conv_idx is None and x.shape[-2:] == (16, 16):
+ self._half_conv_idx = i + 1
+ half_conv_dim = x.shape[1]
+ final_conv_dim = x.shape[1]
+
+ self.vit_to_conv_fusion = Fuser(vit.embed_dim, half_conv_dim)
+ self.conv_to_vit_fusion = Fuser(half_conv_dim, vit.embed_dim)
+ self.vit_ds = AttnDownsample(vit.embed_dim, window_size=2)
+
+ embed_dim = vit.embed_dim + (final_conv_dim if concatenate else 0)
+ if not concatenate:
+ self.final_fuse = Fuser(final_conv_dim, vit.embed_dim, gated=False)
+ self.final_block = tvit.Block(embed_dim, num_heads=16)
+
+ self.embed_dim = embed_dim
+
+ @property
+ def patch_size(self):
+ return 32
+
+ @property
+ def no_fsdp_wrap_types(self):
+ return {tvit.VisionTransformer, tconv.ConvNeXt}
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ return self.forward_features(x)
+
+ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
+ y_vit = self.vit.patch_generator(x)
+
+ for i in range(self._half_vit_idx):
+ y_vit = self.vit.blocks[i](y_vit)
+
+ y_conv = self.conv.stem(x)
+ for i in range(self._half_conv_idx):
+ y_conv = self.conv.stages[i](y_conv)
+
+ y_vit, y_conv = self.conv_to_vit_fusion(y_conv, y_vit), self.vit_to_conv_fusion(y_vit, y_conv)
+
+ y_vit = self.vit_ds(y_vit, y_conv.shape[-2:])
+
+ for i in range(self._half_vit_idx, len(self.vit.blocks)):
+ y_vit = self.vit.blocks[i](y_vit)
+
+ for i in range(self._half_conv_idx, len(self.conv.stages)):
+ y_conv = self.conv.stages[i](y_conv)
+
+ if self.concatenate:
+ y_conv = rearrange(y_conv, 'b c h w -> b (h w) c')
+ # Average pool across the board, and replicate for each cls/register token
+ conv_summary = y_conv.mean(dim=1, keepdim=True).expand(-1, self.vit.patch_generator.num_cls_patches, -1)
+ y_conv = torch.cat([conv_summary, y_conv], dim=1)
+ y = torch.cat([y_vit, y_conv], dim=2)
+ else:
+ y = self.final_fuse(y_conv, y_vit)
+ y = self.final_block(y)
+
+ summary = y[:, :self.vit.patch_generator.num_cls_tokens]
+ features = y[:, self.vit.patch_generator.num_cls_patches:]
+
+ return summary, features
+
+
+@register_model
+def hybrid_base(pretrained=False, concatenate: bool = False, weight_init: str = 'skip', **kwargs):
+ cfg = dict(num_classes=0, **kwargs)
+ conv = tconv.convnextv2_base(pretrained=pretrained, **cfg)
+ vit = tvit.vit_base_patch16_224(pretrained=pretrained, weight_init=weight_init, **cfg)
+
+ return HybridModel(vit, conv, pretrained, concatenate=concatenate)
+
+
+@register_model
+def hybrid_large(pretrained=False, concatenate: bool = False, weight_init: str = 'skip', **kwargs):
+ cfg = dict(num_classes=0, **kwargs)
+ conv = tconv.convnextv2_large(pretrained=pretrained, **cfg)
+ vit = tvit.vit_large_patch16_224(pretrained=pretrained, weight_init=weight_init, **cfg)
+
+ return HybridModel(vit, conv, pretrained, concatenate=concatenate)
+
+
+@register_model
+def hybrid_huge(pretrained=False, concatenate: bool = False, weight_init: str = 'skip', **kwargs):
+ cfg = dict(num_classes=0, **kwargs)
+ conv = tconv.convnextv2_huge(pretrained=pretrained, **cfg)
+ vit = et.vit_huge_patch16_224(pretrained=pretrained, weight_init=weight_init, **cfg)
+
+ return HybridModel(vit, conv, pretrained, concatenate=concatenate)
diff --git a/tim/models/nvidia_radio/radio/enable_cpe_support.py b/tim/models/nvidia_radio/radio/enable_cpe_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f414d171b1ac2bfa43f3968c5a71e6cb54c6774
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/enable_cpe_support.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+from typing import List, Optional, Set, Tuple, Union
+from types import MethodType
+
+import torch
+from torch import nn
+
+from timm.models import VisionTransformer, checkpoint_seq
+from timm.models.vision_transformer import Attention, Block
+
+from .feature_normalizer import IntermediateFeatureNormalizerBase, NullIntermediateFeatureNormalizer
+
+from .extra_models import DinoWrapper
+from .vit_patch_generator import ViTPatchGenerator
+from .forward_intermediates import forward_intermediates
+from .dual_hybrid_vit import HybridModel
+from flash_attn import flash_attn_varlen_func
+
+
+def _attn_forward_pack(self: Attention, x: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor:
+ N, C = x.shape
+ qkv = self.qkv(x).reshape(N, 3, self.num_heads, self.head_dim).permute(1, 0, 2, 3)
+ q, k, v = qkv.unbind(0)
+ q, k = self.q_norm(q), self.k_norm(k)
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
+
+ x = flash_attn_varlen_func(
+ q, k, v, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen
+ ).reshape(N, -1)
+
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+def _block_forward_pack(self: Block, x: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor:
+ x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), cu_seqlens)))
+ x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
+ return x
+
+def _forward_cpe_pack(self: VisionTransformer, images: List[torch.Tensor]) -> torch.Tensor:
+ device = images[0].device
+ x = []
+ seqlens = []
+ for image in images:
+ # image: [1, c, H, W] -> x: [n_cls+h*w, D], h=H/p and w=W/p
+ _image = self.patch_generator(image).squeeze(0)
+ x.append(_image)
+ seqlens.append(_image.shape[0])
+
+ x = torch.cat(x, dim=0)
+ seqlens = torch.tensor(seqlens, device=device, dtype=torch.int)
+
+ cu_seqlens = torch.cat([
+ torch.tensor([0], device=device, dtype=torch.int32),
+ torch.cumsum(seqlens, dim=0, dtype=torch.int32)
+ ])
+ if getattr(self, 'grad_checkpointing', False) and not torch.jit.is_scripting():
+ for block in self.blocks:
+ x = checkpoint_seq(block, x, cu_seqlens)
+ else:
+ for block in self.blocks:
+ x = block(x, cu_seqlens)
+ x = self.norm(x)
+ return x, cu_seqlens
+
+def _forward_cpe(self: VisionTransformer, x: torch.Tensor) -> torch.Tensor:
+ x = self.patch_generator(x)
+ if getattr(self, 'grad_checkpointing', False) and not torch.jit.is_scripting():
+ x = checkpoint_seq(self.blocks, x)
+ else:
+ x = self.blocks(x)
+ x = self.norm(x)
+ return x
+
+
+def _take_indices(
+ num_blocks: int,
+ n: Optional[Union[int, List[int], Tuple[int]]],
+) -> Tuple[Set[int], int]:
+ if isinstance(n, int):
+ assert n >= 0
+ take_indices = {x for x in range(num_blocks - n, num_blocks)}
+ else:
+ take_indices = {num_blocks + idx if idx < 0 else idx for idx in n}
+ return take_indices, max(take_indices)
+
+
+def _forward_intermediates_cpe(
+ self,
+ x: torch.Tensor,
+ norm: bool = False,
+ **kwargs,
+) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
+ return forward_intermediates(
+ self,
+ patch_extractor=self.patch_generator,
+ num_summary_tokens=self.patch_generator.num_skip,
+ num_cls_tokens=self.patch_generator.num_cls_tokens,
+ norm=self.norm if norm else lambda y: y,
+ x=x,
+ **kwargs,
+ )
+
+
+def _forward_cpe_dinov2(self: DinoWrapper, x: torch.Tensor) -> torch.Tensor:
+ y = _forward_cpe(self.inner, x)
+
+ return y[:, 0], y[:, self.num_summary_tokens:]
+
+
+def _forward_intermediates_cpe_dinov2(self: DinoWrapper, *args, **kwargs):
+ return _forward_intermediates_cpe(self.inner, *args, **kwargs)
+
+
+def _enable_cpe_for_timm_vit(model: VisionTransformer,
+ max_img_size: Union[int, Tuple[int, int]] = 1024,
+ num_cls_tokens: int = 1,
+ pos_dropout: float = 0.1,
+ register_multiple: int = Optional[None],
+ num_registers: int = Optional[None],
+ support_packing: bool = False,
+):
+ if not isinstance(model, VisionTransformer):
+ raise ValueError("CPE only support for VisionTransformer models!")
+
+ patch_size = model.patch_embed.patch_size[0]
+ embed_dim = model.embed_dim
+ input_dims = model.patch_embed.img_size
+ normalize_patches = not isinstance(model.patch_embed.norm, nn.Identity)
+ cls_token = model.cls_token is not None
+
+ max_img_size = int(round(max_img_size / patch_size) * patch_size)
+
+ patch_generator = ViTPatchGenerator(
+ patch_size=patch_size,
+ embed_dim=embed_dim,
+ input_dims=input_dims,
+ normalize_patches=normalize_patches,
+ cls_token=cls_token,
+ max_input_dims=max_img_size,
+ pos_dropout=pos_dropout,
+ num_cls_tokens=num_cls_tokens,
+ register_multiple=register_multiple,
+ num_registers=num_registers,
+ )
+
+ model.patch_generator = patch_generator
+ model.patch_embed = None
+ model.cls_token = None
+ model.pos_embed = None
+ model.pos_drop = None
+ model.patch_size = patch_size
+ model.num_cls_tokens = num_cls_tokens
+ model.num_registers = patch_generator.num_registers
+
+ model.forward_features = MethodType(_forward_cpe, model)
+ model.forward_intermediates = MethodType(_forward_intermediates_cpe, model)
+ if support_packing:
+ model.forward_features = MethodType(_forward_cpe_pack, model)
+ for block in model.blocks:
+ block.forward = MethodType(_block_forward_pack, block)
+ block.attn.forward = MethodType(_attn_forward_pack, block.attn)
+
+
+def _enable_cpe_for_dv2_reg_vit(model: DinoWrapper,
+ max_img_size: Union[int, Tuple[int, int]] = 1024,
+ num_cls_tokens: int = 1,
+ pos_dropout: float = 0.1,
+ register_multiple: int = Optional[None],
+ num_registers: int = Optional[None],
+):
+ patch_size = model.patch_size
+ embed_dim = model.embed_dim
+ input_dims = model.inner.patch_embed.patches_resolution
+ normalize_patches = not isinstance(model.inner.patch_embed.norm, nn.Identity)
+ cls_token = True
+
+ max_img_size = int(round(max_img_size / patch_size) * patch_size)
+
+ patch_generator = ViTPatchGenerator(
+ patch_size=patch_size,
+ embed_dim=embed_dim,
+ input_dims=input_dims,
+ normalize_patches=normalize_patches,
+ cls_token=cls_token,
+ max_input_dims=max_img_size,
+ pos_dropout=pos_dropout,
+ num_cls_tokens=num_cls_tokens,
+ register_multiple=register_multiple,
+ num_registers=num_registers,
+ patch_bias=True,
+ )
+
+ inner = model.inner
+ inner.patch_generator = patch_generator
+ inner.patch_embed = None
+ inner.cls_token = None
+ inner.pos_embed = None
+ inner.register_tokens = None
+ inner.patch_size = patch_size
+
+ model.forward_features = MethodType(_forward_cpe_dinov2, model)
+ model.forward_intermediates = MethodType(_forward_intermediates_cpe_dinov2, model)
+
+
+def enable_cpe(model: nn.Module,
+ *args,
+ **kwargs,
+):
+ if isinstance(model, VisionTransformer):
+ _enable_cpe_for_timm_vit(model, *args, **kwargs)
+ elif isinstance(model, DinoWrapper):
+ _enable_cpe_for_dv2_reg_vit(model, *args, **kwargs)
+ elif isinstance(model, HybridModel):
+ _enable_cpe_for_timm_vit(model.vit, *args, **kwargs)
+ else:
+ raise ValueError(f'CPE not supported for this model type: {type(model)}')
diff --git a/tim/models/nvidia_radio/radio/enable_damp.py b/tim/models/nvidia_radio/radio/enable_damp.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f139f8848dab1d487fbb8b8953e036335e11efe
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/enable_damp.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+from logging import getLogger
+import math
+import os
+from typing import Dict, List, Optional, Union, Tuple
+from types import MethodType
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.nn.utils import parametrize
+
+
+# For now, don't do anything
+class DAMP(nn.Identity):
+ def __init__(self, std: float):
+ super().__init__()
+ self.std = std
+
+
+def enable_damp(model: nn.Module, std: float):
+ if isinstance(model, (list, tuple)):
+ for m in model:
+ enable_damp(m, std)
+ return
+
+ for name, module in model.named_modules():
+ if isinstance(module, nn.Linear):
+ parametrize.register_parametrization(module, 'weight', DAMP(std))
+
+
+def configure_damp_from_args(model: nn.Module, args):
+ damp = getattr(args, 'damp', None)
+ if damp:
+ enable_damp(model, damp)
diff --git a/tim/models/nvidia_radio/radio/enable_spectral_reparam.py b/tim/models/nvidia_radio/radio/enable_spectral_reparam.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc2d6d10f8809e32b630e5660fd9f1ad35497119
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/enable_spectral_reparam.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+from logging import getLogger
+import math
+import os
+from typing import Dict, List, Optional, Union, Tuple
+from types import MethodType
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.nn.utils import parametrize
+from torch.nn.utils.parametrizations import _SpectralNorm
+
+from timm.models.vision_transformer import Attention, Mlp
+
+_EPS = 1e-5
+
+
+class _SNReweight(_SpectralNorm):
+ def __init__(self, weight: torch.Tensor, *args, init_norm_to_current: bool = False, alpha: float = 0.05, version: int = 2, **kwargs):
+ super().__init__(weight, *args, **kwargs)
+
+ self.alpha = alpha
+ self.version = version
+ self.register_buffer('_sn_version', torch.tensor(version))
+
+ if init_norm_to_current:
+ # This will set the numerator to match the denominator, which should preserve the original values
+ init_scale = self._get_sigma(weight, n_power_iterations=20).item()
+ else:
+ init_scale = 1.0
+
+ if version == 1:
+ init_value = init_scale
+ elif version == 2:
+ t = init_scale - alpha
+ if t < _EPS:
+ getLogger("spectral_reparam").warn(f'The initialized spectral norm {init_scale} is too small to be represented. Setting to {_EPS} instead.')
+ t = _EPS
+
+ init_value = math.log(math.exp(t) - 1)
+ else:
+ raise ValueError(f'Unsupported version: {version}')
+
+ # Make 2D so that weight decay gets applied
+ self.scale = nn.Parameter(torch.tensor([[init_value]], dtype=torch.float32, device=weight.device))
+
+ # Re-implementing this because we need to make division by sigma safe
+ def _get_sigma(self, weight: torch.Tensor, n_power_iterations: int = None) -> torch.Tensor:
+ if not n_power_iterations:
+ n_power_iterations = self.n_power_iterations
+ if weight.ndim == 1:
+ # Faster and more exact path, no need to approximate anything
+ sigma = weight.norm()
+ else:
+ weight_mat = self._reshape_weight_to_matrix(weight)
+ if self.training:
+ self._power_method(weight_mat, n_power_iterations)
+ # See above on why we need to clone
+ u = self._u.clone(memory_format=torch.contiguous_format)
+ v = self._v.clone(memory_format=torch.contiguous_format)
+ # The proper way of computing this should be through F.bilinear, but
+ # it seems to have some efficiency issues:
+ # https://github.com/pytorch/pytorch/issues/58093
+ sigma = torch.dot(u, torch.mv(weight_mat, v))
+
+ return sigma + self.eps
+
+ def forward(self, weight: torch.Tensor, *args, **kwargs):
+ dtype = weight.dtype
+ sigma = self._get_sigma(weight, *args, **kwargs)
+
+ if self.version == 1:
+ scale = self.scale
+ elif self.version == 2:
+ scale = F.softplus(self.scale) + self.alpha
+ else:
+ raise ValueError(f'Unsupported version: {self.version}')
+
+ scale = scale.float() / sigma.float()
+
+ y = weight * scale
+
+ if dtype in (torch.float16, torch.bfloat16):
+ y = y.to(dtype)
+ return y
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
+ version_key = f'{prefix}_sn_version'
+ if version_key not in state_dict:
+ self.version = 1
+ state_dict[version_key] = torch.tensor(1)
+ return super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
+
+
+class _ChunkedSNReweight(nn.Module):
+ def __init__(self, weight: torch.Tensor, num_chunks: int, *args, init_norm_to_current: bool = False, **kwargs):
+ super().__init__()
+
+ self.num_chunks = num_chunks
+ parts = weight.split(weight.shape[0] // num_chunks, dim=0)
+
+ self.parts = nn.ModuleList([
+ _SNReweight(p, *args, init_norm_to_current=init_norm_to_current, **kwargs)
+ for p in parts
+ ])
+
+ def forward(self, weight: torch.Tensor, *args, **kwargs):
+ parts = weight.split(weight.shape[0] // self.num_chunks, dim=0)
+
+ parts = [
+ fn(p)
+ for fn, p in zip(self.parts, parts)
+ ]
+
+ return torch.cat(parts, dim=0)
+
+
+class _AttnSNReweight(_ChunkedSNReweight):
+ def __init__(self, weight: torch.Tensor, *args, init_norm_to_current: bool = False, renorm_values: bool = False, **kwargs):
+ super().__init__(weight, 3, *args, init_norm_to_current=init_norm_to_current, **kwargs)
+
+ if not renorm_values:
+ self.parts[2] = nn.Identity()
+
+
+def enable_spectral_reparam(model: Union[nn.Module, List[nn.Module]],
+ n_power_iterations: int = 1,
+ eps: float = 1e-6,
+ init_norm_to_current: bool = False,
+ renorm_values: bool = True,
+ renorm_mlp: bool = True,
+ state_dict_guidance: Optional[Dict[str, torch.Tensor]] = None):
+ if isinstance(model, (list, tuple)):
+ for i, sub in enumerate(model):
+ sub_sd = state_dict_guidance[i] if isinstance(state_dict_guidance, (list, tuple)) else state_dict_guidance
+ enable_spectral_reparam(sub, n_power_iterations=n_power_iterations, eps=eps,
+ init_norm_to_current=init_norm_to_current, renorm_values=renorm_values,
+ renorm_mlp=renorm_mlp, state_dict_guidance=sub_sd)
+ return
+
+ print('Enabling spectral reparametrization')
+ args = dict(n_power_iterations=n_power_iterations, dim=0, eps=eps, init_norm_to_current=init_norm_to_current)
+ visited_prefixes = set()
+
+ def is_guidance_parametrized(name: str):
+ if state_dict_guidance is None:
+ return True
+
+ p_name = f'{name}.parametrizations'
+ is_prm = any(k for k in state_dict_guidance if k.startswith(p_name) and k.endswith('_sn_version'))
+ return is_prm
+
+ def parametrize_linear(linear: nn.Linear):
+ parametrize.register_parametrization(
+ linear,
+ 'weight',
+ _SNReweight(linear.weight, **args)
+ )
+
+ for name, mod in model.named_modules():
+ pref = '.'.join(name.split('.')[:-1])
+ if pref in visited_prefixes:
+ continue
+
+ if isinstance(mod, Attention) or name.endswith('.attn'):
+ if is_guidance_parametrized(f'{name}.qkv'):
+ parametrize.register_parametrization(
+ mod.qkv,
+ 'weight',
+ _AttnSNReweight(mod.qkv.weight, renorm_values=renorm_values, **args),
+ )
+ if hasattr(mod, 'proj') and is_guidance_parametrized(f'{name}.proj'):
+ parametrize_linear(mod.proj)
+ visited_prefixes.add(name)
+ elif name.endswith('mlp') and renorm_mlp and hasattr(mod, 'w12'):
+ if is_guidance_parametrized(f'{name}.w12'):
+ parametrize.register_parametrization(
+ mod.w12,
+ 'weight',
+ _ChunkedSNReweight(mod.w12.weight, num_chunks=2, **args),
+ )
+ if is_guidance_parametrized(f'{name}.w3'):
+ parametrize_linear(mod.w3)
+ visited_prefixes.add(name)
+ elif isinstance(mod, nn.Linear) and 'patch_generator' not in name and is_guidance_parametrized(name):
+ parametrize_linear(mod)
+
+
+def configure_spectral_reparam_from_args(model: nn.Module, args, state_dict_guidance: Optional[Dict[str, torch.Tensor]] = None):
+ spectral_reparam = getattr(args, 'spectral_reparam', False)
+ if isinstance(spectral_reparam, bool) and spectral_reparam:
+ enable_spectral_reparam(model, init_norm_to_current=True, state_dict_guidance=state_dict_guidance)
+ elif isinstance(spectral_reparam, dict):
+ enable_spectral_reparam(
+ model,
+ n_power_iterations=spectral_reparam.get('n_power_iterations', 1),
+ eps=spectral_reparam.get('eps', 1e-12),
+ init_norm_to_current=True,
+ state_dict_guidance=state_dict_guidance,
+ )
+
+
+def disable_spectral_reparam(model: nn.Module):
+ print('Disabling spectral reparametrization')
+ for name, mod in model.named_modules():
+ if parametrize.is_parametrized(mod):
+ parametrize.remove_parametrizations(mod, 'weight')
+ pass
+
+
+
+if __name__ == '__main__':
+ import argparse
+ from . import radio_model as create_model
+
+ parser = argparse.ArgumentParser(description='Remove parametrization from state dict')
+ parser.add_argument('--checkpoint', type=str, required=True, help='The checkpoint to load')
+ parser.add_argument('--output', type=str, default='', help='Where to store the checkpoint')
+ parser.add_argument('--release', default=False, action='store_true', help='Prune extraneous checkpoint fields')
+ parser.add_argument('--strict', default=False, action='store_true', help='Strictly load the state dict')
+
+ args = parser.parse_args()
+
+ if not args.output:
+ chk_dir, chk_name = os.path.split(args.checkpoint)
+ args.output = os.path.join(chk_dir, f'clean_{chk_name}')
+ print(f'Set output to "{args.output}"')
+
+ chk = torch.load(args.checkpoint, map_location='cpu', mmap=True)
+
+ model = create_model.create_model_from_args(chk['args'])
+
+ key = 'base_model.'
+ mod_state = dict()
+ extra_state = dict()
+ for k, v in chk['state_dict'].items():
+ if k.startswith(key):
+ mod_state[k[len(key):]] = v
+ else:
+ extra_state[k] = v
+
+ chk_load_info = model.load_state_dict(mod_state, strict=args.strict)
+ if chk_load_info.unexpected_keys or chk_load_info.missing_keys:
+ print(chk_load_info)
+
+ if chk['args'].spectral_reparam:
+ disable_spectral_reparam(model)
+
+ if hasattr(chk['args'], 'dtype'):
+ model.to(dtype=chk['args'].dtype)
+
+ mod_state = model.state_dict()
+ final_state = dict()
+ final_state.update({f'{key}{k}': v for k, v in mod_state.items()})
+ final_state.update(extra_state)
+
+ chk['state_dict'] = final_state
+ chk['args'].spectral_reparam = False
+
+ if args.release:
+ chk = {
+ 'arch': chk['arch'],
+ 'epoch': chk['epoch'],
+ 'state_dict': chk['state_dict'],
+ 'args': chk['args'],
+ }
+
+ torch.save(chk, args.output)
+ pass
diff --git a/tim/models/nvidia_radio/radio/eradio_model.py b/tim/models/nvidia_radio/radio/eradio_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..61db4898b8b26f82647bfe17314aad3ac7b71b3c
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/eradio_model.py
@@ -0,0 +1,1392 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+# E-RADIO model from
+# Mike Ranzinger, Greg Heinrich, Jan Kautz, and Pavlo Molchanov. "AM-RADIO: Agglomerative Model--Reduce All Domains Into One." arXiv preprint arXiv:2312.06709 (2023).
+
+# based on FasterViT, Swin Transformer, YOLOv8
+
+# FasterViT:
+# Ali Hatamizadeh, Greg Heinrich, Hongxu Yin, Andrew Tao, Jose M. Alvarez, Jan Kautz, and Pavlo Molchanov. "FasterViT: Fast Vision Transformers with Hierarchical Attention." arXiv preprint arXiv:2306.06189 (2023).
+
+import timm
+import torch
+import torch.nn as nn
+from timm.models.registry import register_model
+
+from timm.models.layers import trunc_normal_, DropPath, LayerNorm2d
+import numpy as np
+import torch.nn.functional as F
+import math
+import warnings
+
+#######################
+## Codebase from YOLOv8
+## BEGINNING
+#######################
+
+class C2f(nn.Module):
+ """Faster Implementation of CSP Bottleneck with 2 convolutions."""
+ """From YOLOv8 codebase"""
+ def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, drop_path=None): # ch_in, ch_out, number, shortcut, groups, expansion
+ super().__init__()
+ if drop_path is None:
+ drop_path = [0.0] * n
+
+ self.c = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, 2 * self.c, 1, 1)
+ self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2)
+ self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0, drop_path=drop_path[i]) for i in range(n))
+
+ def forward(self, x):
+ """Forward pass through C2f layer."""
+ y = list(self.cv1(x).chunk(2, 1))
+ y.extend(m(y[-1]) for m in self.m)
+ return self.cv2(torch.cat(y, 1))
+
+ def forward_split(self, x):
+ """Forward pass using split() instead of chunk()."""
+ y = list(self.cv1(x).split((self.c, self.c), 1))
+ y.extend(m(y[-1]) for m in self.m)
+ return self.cv2(torch.cat(y, 1))
+
+class Bottleneck(nn.Module):
+ """Standard bottleneck."""
+
+ def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5, drop_path=0.0): # ch_in, ch_out, shortcut, groups, kernels, expand
+ super().__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, k[0], 1)
+ self.cv2 = Conv(c_, c2, k[1], 1, g=g)
+ self.add = shortcut and c1 == c2
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+
+ def forward(self, x):
+ """'forward()' applies the YOLOv5 FPN to input data."""
+ return x + self.drop_path1(self.cv2(self.cv1(x))) if self.add else self.cv2(self.cv1(x))
+
+
+class Conv(nn.Module):
+ """Modified to support layer fusion"""
+ default_act = nn.SiLU() # default activation
+
+ def __init__(self, a, b, kernel_size=1, stride=1, padding=None, g=1, dilation=1, bn_weight_init=1, bias=False, act=True):
+ super().__init__()
+
+ self.conv = torch.nn.Conv2d(a, b, kernel_size, stride, autopad(kernel_size, padding, dilation), dilation, g, bias=False)
+ if 1:
+ self.bn = torch.nn.BatchNorm2d(b)
+ torch.nn.init.constant_(self.bn.weight, bn_weight_init)
+ torch.nn.init.constant_(self.bn.bias, 0)
+ self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
+
+
+ def forward(self,x):
+ x = self.conv(x)
+ x = self.bn(x)
+ x = self.act(x)
+ return x
+
+ @torch.no_grad()
+ def switch_to_deploy(self):
+ # return 1
+ if not isinstance(self.bn, nn.Identity):
+ c, bn = self.conv, self.bn
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
+ w = c.weight * w[:, None, None, None]
+ b = bn.bias - bn.running_mean * bn.weight / \
+ (bn.running_var + bn.eps)**0.5
+
+ self.conv.weight.data.copy_(w)
+ self.conv.bias = nn.Parameter(b)
+
+ self.bn = nn.Identity()
+
+def autopad(k, p=None, d=1): # kernel, padding, dilation
+ """Pad to 'same' shape outputs."""
+ if d > 1:
+ k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
+ if p is None:
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
+ return p
+
+
+#######################
+## Codebase from YOLOv8
+## END
+#######################
+
+def pixel_unshuffle(data, factor=2):
+ # performs nn.PixelShuffle(factor) in reverse, torch has some bug for ONNX and TRT, so doing it manually
+ B, C, H, W = data.shape
+ return data.view(B, C, factor, H//factor, factor, W//factor).permute(0,1,2,4,3,5).reshape(B, -1, H//factor, W//factor)
+
+class SwiGLU(nn.Module):
+ # should be more advanced, but doesnt improve results so far
+ def forward(self, x):
+ x, gate = x.chunk(2, dim=-1)
+ return F.silu(gate) * x
+
+
+def window_partition(x, window_size):
+ """
+ Function for partitioning image into windows and later do windowed attention
+ Args:
+ x: (B, C, H, W)
+ window_size: window size
+ Returns:
+ windows - local window features (num_windows*B, window_size*window_size, C)
+ (Hp, Wp) - the size of the padded image
+ """
+ B, C, H, W = x.shape
+
+ if window_size == 0 or (window_size==H and window_size==W):
+ windows = x.flatten(2).transpose(1, 2)
+ Hp, Wp = H, W
+ else:
+ pad_h = (window_size - H % window_size) % window_size
+ pad_w = (window_size - W % window_size) % window_size
+ if pad_h > 0 or pad_w > 0:
+ x = F.pad(x, (0, pad_w, 0, pad_h), mode="reflect")
+ Hp, Wp = H + pad_h, W + pad_w
+
+ x = x.view(B, C, Hp // window_size, window_size, Wp // window_size, window_size)
+ windows = x.permute(0, 2, 4, 3, 5, 1).reshape(-1, window_size*window_size, C)
+
+ return windows, (Hp, Wp)
+
+class Conv2d_BN(nn.Module):
+ '''
+ Conv2d + BN layer with folding capability to speed up inference
+ Can be merged with Conv() function with additional arguments
+ '''
+ def __init__(self, a, b, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bn_weight_init=1, bias=False):
+ super().__init__()
+ self.conv = torch.nn.Conv2d(a, b, kernel_size, stride, padding, dilation, groups, bias=False)
+ if 1:
+ self.bn = torch.nn.BatchNorm2d(b)
+ torch.nn.init.constant_(self.bn.weight, bn_weight_init)
+ torch.nn.init.constant_(self.bn.bias, 0)
+
+ def forward(self,x):
+ x = self.conv(x)
+ x = self.bn(x)
+ return x
+
+ @torch.no_grad()
+ def switch_to_deploy(self):
+ if not isinstance(self.bn, nn.Identity):
+ c, bn = self.conv, self.bn
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
+ w = c.weight * w[:, None, None, None]
+ b = bn.bias - bn.running_mean * bn.weight / \
+ (bn.running_var + bn.eps)**0.5
+ self.conv.weight.data.copy_(w)
+ self.conv.bias = nn.Parameter(b)
+ self.bn = nn.Identity()
+
+
+
+def window_reverse(windows, window_size, H, W, pad_hw):
+ """
+ Windows to the full feature map
+ Args:
+ windows: local window features (num_windows*B, window_size, window_size, C)
+ window_size: Window size
+ H: Height of image
+ W: Width of image
+ pad_w - a tuple of image passing used in windowing step
+ Returns:
+ x: (B, C, H, W)
+
+ """
+ # print(f"window_reverse, windows.shape {windows.shape}")
+ Hp, Wp = pad_hw
+ if window_size == 0 or (window_size==H and window_size==W):
+ B = int(windows.shape[0] / (Hp * Wp / window_size / window_size))
+ x = windows.transpose(1, 2).view(B, -1, H, W)
+ else:
+ B = int(windows.shape[0] / (Hp * Wp / window_size / window_size))
+ x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
+ x = x.permute(0, 5, 1, 3, 2, 4).reshape(B,windows.shape[2], Hp, Wp)
+
+ if Hp > H or Wp > W:
+ x = x[:, :, :H, :W, ].contiguous()
+
+ return x
+
+
+
+class PosEmbMLPSwinv2D(nn.Module):
+ """
+ 2D positional embedding from Swin Transformer v2
+ Added functionality to store the positional embedding in the model and not recompute it every time
+ """
+ def __init__(
+ self, window_size, pretrained_window_size, num_heads, seq_length, no_log=False, cpb_mlp_hidden=512,
+ ):
+ super().__init__()
+ self.window_size = window_size
+ self.num_heads = num_heads
+ # mlp to generate continuous relative position bias
+ self.cpb_mlp = nn.Sequential(
+ nn.Linear(2, cpb_mlp_hidden, bias=True),
+ nn.ReLU(inplace=True),
+ nn.Linear(cpb_mlp_hidden, num_heads, bias=False),
+ )
+
+ self.grid_exists = False
+ self.seq_length = seq_length
+ self.deploy = False
+ self.num_heads = num_heads
+ self.no_log = no_log
+ self.pretrained_window_size = pretrained_window_size
+ self.relative_bias_window_size = window_size
+
+ relative_coords_table, relative_position_index, relative_bias = self.relative_bias_initialization(window_size, num_heads,
+ pretrained_window_size, seq_length,
+ no_log)
+
+ self.register_buffer("relative_coords_table", relative_coords_table)
+ self.register_buffer("relative_position_index", relative_position_index)
+ self.register_buffer("relative_bias", relative_bias) # for EMA
+
+ def relative_bias_initialization(self, window_size, num_heads, pretrained_window_size, seq_length, no_log):
+ # as in separate function to support window size chage after model weights loading
+ relative_coords_h = torch.arange(
+ -(window_size[0] - 1), window_size[0], dtype=torch.float32
+ )
+ relative_coords_w = torch.arange(
+ -(window_size[1] - 1), window_size[1], dtype=torch.float32
+ )
+ relative_coords_table = (
+ torch.stack(torch.meshgrid([relative_coords_h, relative_coords_w]))
+ .permute(1, 2, 0)
+ .contiguous()
+ .unsqueeze(0)
+ ) # 1, 2*Wh-1, 2*Ww-1, 2
+ if pretrained_window_size[0] > 0:
+ relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1
+ relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1
+ else:
+ relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
+ relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
+
+ if not no_log:
+ relative_coords_table *= 8 # normalize to -8, 8
+ relative_coords_table = (
+ torch.sign(relative_coords_table)
+ * torch.log2(torch.abs(relative_coords_table) + 1.0)
+ / np.log2(8)
+ )
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = (
+ coords_flatten[:, :, None] - coords_flatten[:, None, :]
+ ) # 2, Wh*Ww, Wh*Ww
+ relative_coords = relative_coords.permute(
+ 1, 2, 0
+ ).contiguous() # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+
+ relative_bias = torch.zeros(1, num_heads, seq_length, seq_length)
+
+ self.relative_bias_window_size = window_size
+
+ return relative_coords_table, relative_position_index, relative_bias
+
+
+ def switch_to_deploy(self):
+ self.deploy = True
+ self.grid_exists = True
+
+ def forward(self, input_tensor):
+ # for efficiency, we want this forward to be folded into a single operation (sum)
+ # if resolution stays the same, then we dont need to recompute MLP layers
+
+ if not self.deploy or self.training:
+ self.grid_exists = False
+
+ #compare if all elements in self.window_size list match those in self.relative_bias_window_size
+ if not all([self.window_size[i] == self.relative_bias_window_size[i] for i in range(len(self.window_size))]):
+ relative_coords_table, relative_position_index, relative_bias = self.relative_bias_initialization(self.window_size, self.num_heads,
+ self.pretrained_window_size, self.seq_length,
+ self.no_log)
+
+ self.relative_coords_table = relative_coords_table.to(self.relative_coords_table.device)
+ self.relative_position_index = relative_position_index.to(self.relative_position_index.device)
+ self.relative_bias = relative_bias.to(self.relative_bias.device)
+
+ if self.deploy and self.grid_exists:
+ input_tensor = input_tensor + self.relative_bias
+ return input_tensor
+
+ if 1:
+ self.grid_exists = True
+
+ relative_position_bias_table = self.cpb_mlp(
+ self.relative_coords_table
+ ).view(-1, self.num_heads)
+ relative_position_bias = relative_position_bias_table[
+ self.relative_position_index.view(-1)
+ ].view(
+ self.window_size[0] * self.window_size[1],
+ self.window_size[0] * self.window_size[1],
+ -1,
+ ) # Wh*Ww,Wh*Ww,nH
+
+ relative_position_bias = relative_position_bias.permute(
+ 2, 0, 1
+ ).contiguous() # nH, Wh*Ww, Wh*Ww
+ relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
+
+ self.relative_bias = relative_position_bias.unsqueeze(0)
+
+ input_tensor = input_tensor + self.relative_bias
+ return input_tensor
+
+
+class GRAAttentionBlock(nn.Module):
+ def __init__(self, window_size, dim_in, dim_out,
+ num_heads, drop_path=0., qk_scale=None, qkv_bias=False,
+ norm_layer=nn.LayerNorm, layer_scale=None,
+ use_swiglu=True,
+ subsample_ratio=1, dim_ratio=1, conv_base=False,
+ do_windowing=True, multi_query=False, use_shift=0,
+ cpb_mlp_hidden=512, conv_groups_ratio=0):
+ '''
+ Global Resolution Attention Block , see README for details
+ Attention with subsampling to get a bigger receptive field for attention
+ conv_base - use conv2d instead of avgpool2d for downsample / upsample
+
+
+ '''
+ super().__init__()
+
+ self.shift_size=window_size//2 if use_shift else 0
+
+ self.do_windowing = do_windowing
+ self.subsample_ratio = subsample_ratio
+
+
+
+ if do_windowing:
+ if conv_base:
+ self.downsample_op = nn.Conv2d(dim_in, dim_out, kernel_size=subsample_ratio, stride=subsample_ratio) if subsample_ratio > 1 else nn.Identity()
+
+
+ self.downsample_mixer = nn.Identity()
+ self.upsample_mixer = nn.Identity()
+ self.upsample_op = nn.ConvTranspose2d(dim_in, dim_out, kernel_size=subsample_ratio, stride=subsample_ratio) if subsample_ratio > 1 else nn.Identity()
+ else:
+ self.downsample_op = nn.AvgPool2d(kernel_size=subsample_ratio, stride=subsample_ratio) if subsample_ratio > 1 else nn.Identity()
+ self.downsample_mixer = Conv2d_BN(dim_in, dim_out, kernel_size=1, stride=1) if subsample_ratio > 1 else nn.Identity()
+ self.upsample_mixer = nn.Upsample(scale_factor=subsample_ratio, mode='nearest') if subsample_ratio > 1 else nn.Identity()
+ self.upsample_op = Conv2d_BN(dim_in, dim_out, kernel_size=1, stride=1, padding=0, bias=False) if subsample_ratio > 1 else nn.Identity()
+
+
+ # in case there is no downsampling conv we want to have it separately
+ # will help with information propagation between windows
+ if subsample_ratio == 1:
+ # conv_groups_ratio=0
+ self.pre_conv = Conv2d_BN(dim_in, dim_in, kernel_size=3, stride=1, padding=1, groups=max(1,int(conv_groups_ratio*dim_in)), bias=False)
+ # self.pre_conv = nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=1, groups=max(1,int(conv_groups_ratio*dim_in)), bias=False)
+ # self.pre_conv_act = nn.ReLU6()
+ #for simplicity:
+ self.pre_conv_act = nn.Identity()
+ if conv_groups_ratio == -1:
+ self.pre_conv = nn.Identity()
+ self.pre_conv_act = nn.Identity()
+
+ self.window_size = window_size
+
+ self.norm1 = norm_layer(dim_in)
+
+ self.attn = WindowAttention(
+ dim_in,
+ num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ resolution=window_size,
+ seq_length=window_size**2, dim_out=dim_in, multi_query=multi_query,
+ shift_size=self.shift_size, cpb_mlp_hidden=cpb_mlp_hidden)
+
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+
+ use_layer_scale = layer_scale is not None and type(layer_scale) in [int, float]
+ self.gamma1 = nn.Parameter(layer_scale * torch.ones(dim_in)) if use_layer_scale else 1
+
+ ### mlp layer
+ mlp_ratio = 4
+ self.norm2 = norm_layer(dim_in)
+ mlp_hidden_dim = int(dim_in * mlp_ratio)
+
+ activation = nn.GELU if not use_swiglu else SwiGLU
+ mlp_hidden_dim = int((4 * dim_in * 1 / 2) / 64) * 64 if use_swiglu else mlp_hidden_dim
+
+ self.mlp = Mlp(in_features=dim_in, hidden_features=mlp_hidden_dim, act_layer=activation, use_swiglu=use_swiglu)
+
+ self.gamma2 = nn.Parameter(layer_scale * torch.ones(dim_in)) if layer_scale else 1
+ self.drop_path2=DropPath(drop_path) if drop_path > 0. else nn.Identity()
+
+
+ def forward(self, x):
+ skip_connection = x
+ attn_mask = None
+
+ # in case there is no downsampling conv we want to have it separately
+ # will help with information propagation
+ if self.subsample_ratio == 1:
+ x = self.pre_conv_act(self.pre_conv(x)) + skip_connection
+
+ if self.do_windowing:
+ # performing windowing if required
+ x = self.downsample_op(x)
+ x = self.downsample_mixer(x)
+
+ if self.window_size>0:
+ H, W = x.shape[2], x.shape[3]
+
+ if self.shift_size > 0 and H>self.window_size and W>self.window_size:
+ # @swin like cyclic shift, doesnt show better performance
+ x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(2, 3))
+
+ x, pad_hw = window_partition(x, self.window_size)
+
+ if self.shift_size > 0 and H>self.window_size and W>self.window_size:
+ # set atten matrix to have -100 and the top right square
+ # attn[:, :, :-self.shift_size, -self.shift_size:] = -100.0
+ # calculate attention mask for SW-MSA
+ # not used in final version, can be useful for some cases especially for high res
+ H, W = pad_hw
+ img_mask = torch.zeros((1, H, W, 1), device=x.device) # 1 H W 1
+ h_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ w_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ cnt = 0
+ for h in h_slices:
+ for w in w_slices:
+ img_mask[:, h, w, :] = cnt
+ cnt += 1
+ img_mask = img_mask.transpose(1,2).transpose(1,3)
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
+
+ mask_windows = mask_windows[0].view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+
+ # window attention
+ x = x + self.drop_path1(self.gamma1*self.attn(self.norm1(x), attn_mask=attn_mask)) # or pass H,W
+ # mlp layer
+ x = x + self.drop_path2(self.gamma2*self.mlp(self.norm2(x)))
+
+ if self.do_windowing:
+ if self.window_size > 0:
+ x = window_reverse(x, self.window_size, H, W, pad_hw)
+
+ # reverse cyclic shift
+ if self.shift_size > 0 and H>self.window_size and W>self.window_size:
+ # @swin like cyclic shift, not tested
+ x = torch.roll(x, shifts=(self.shift_size, self.shift_size), dims=(2, 3))
+
+ x = self.upsample_mixer(x)
+ x = self.upsample_op(x)
+
+
+ if x.shape[2] != skip_connection.shape[2] or x.shape[3] != skip_connection.shape[3]:
+ x = torch.nn.functional.pad(x, ( 0, -x.shape[3] + skip_connection.shape[3], 0, -x.shape[2] + skip_connection.shape[2]), mode="reflect")
+ # need to add skip connection because downsampling and upsampling will break residual connection
+ # 0.5 is needed to make sure that the skip connection is not too strong
+ # in case of no downsample / upsample we can show that 0.5 compensates for the residual connection
+ x = 0.5 * x + 0.5 * skip_connection
+ return x
+
+
+
+
+class MultiResolutionAttention(nn.Module):
+ """
+ MultiResolutionAttention (MRA) module
+ The idea is to use multiple attention blocks with different resolution
+ Feature maps are downsampled / upsampled for each attention block on different blocks
+ Every attention block supports windowing
+ """
+
+ def __init__(self, window_size, sr_ratio,
+ dim, dim_ratio, num_heads,
+ do_windowing=True,
+ layer_scale=1e-5, norm_layer=nn.LayerNorm,
+ drop_path = 0, qkv_bias=False, qk_scale=1.0,
+ use_swiglu=True, multi_query=False, conv_base=False,
+ use_shift=0, cpb_mlp_hidden=512, conv_groups_ratio=0) -> None:
+ """
+ Args:
+ input_resolution: input image resolution
+ window_size: window size
+ compression_ratio: compression ratio
+ max_depth: maximum depth of the GRA module
+ use_shift: do window shifting
+ """
+ super().__init__()
+
+ depth = len(sr_ratio)
+
+ self.attention_blocks = nn.ModuleList()
+
+
+ for i in range(depth):
+ subsample_ratio = sr_ratio[i]
+ if len(window_size) > i:
+ window_size_local = window_size[i]
+ else:
+ window_size_local = window_size[0]
+
+ self.attention_blocks.append(GRAAttentionBlock(window_size=window_size_local,
+ dim_in=dim, dim_out=dim, num_heads=num_heads,
+ qkv_bias=qkv_bias, qk_scale=qk_scale, norm_layer=norm_layer,
+ layer_scale=layer_scale, drop_path=drop_path,
+ use_swiglu=use_swiglu, subsample_ratio=subsample_ratio, dim_ratio=dim_ratio,
+ do_windowing=do_windowing, multi_query=multi_query, conv_base=conv_base,
+ use_shift=use_shift, cpb_mlp_hidden=cpb_mlp_hidden, conv_groups_ratio=conv_groups_ratio),
+ )
+
+ def forward(self, x):
+
+ for attention_block in self.attention_blocks:
+ x = attention_block(x)
+
+ return x
+
+
+
+class Mlp(nn.Module):
+ """
+ Multi-Layer Perceptron (MLP) block
+ """
+
+ def __init__(self,
+ in_features,
+ hidden_features=None,
+ out_features=None,
+ act_layer=nn.GELU,
+ use_swiglu=True,
+ drop=0.):
+ """
+ Args:
+ in_features: input features dimension.
+ hidden_features: hidden features dimension.
+ out_features: output features dimension.
+ act_layer: activation function.
+ drop: dropout rate.
+ """
+
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features * (2 if use_swiglu else 1), bias=False)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=False)
+
+ def forward(self, x):
+ x_size = x.size()
+ x = x.view(-1, x_size[-1])
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.fc2(x)
+ x = x.view(x_size)
+ return x
+
+class Downsample(nn.Module):
+ """
+ Down-sampling block
+ Pixel Unshuffle is used for down-sampling, works great accuracy - wise but takes 10% more TRT time
+ """
+
+ def __init__(self,
+ dim,
+ shuffle = False,
+ ):
+ """
+ Args:
+ dim: feature size dimension.
+ shuffle: idea with
+ keep_dim: bool argument for maintaining the resolution.
+ """
+
+ super().__init__()
+ dim_out = 2 * dim
+
+ if shuffle:
+ self.norm = lambda x: pixel_unshuffle(x, factor=2)
+ self.reduction = Conv2d_BN(dim*4, dim_out, 1, 1, 0, bias=False)
+ # pixel unshuffleging works well but doesnt provide any speedup
+ else:
+ # removed layer norm for better, in this formulation we are getting 10% better speed
+ # LayerNorm for high resolution inputs will be a pain as it pools over the entire spatial dimension
+ # therefore we remove it compared to the original implementation in FasterViT
+ self.norm = nn.Identity()
+ self.reduction = Conv2d_BN(dim, dim_out, 3, 2, 1, bias=False)
+
+
+ def forward(self, x):
+ x = self.norm(x)
+ x = self.reduction(x)
+ return x
+
+
+class PatchEmbed(nn.Module):
+ """
+ Patch embedding block
+ Used to convert image into an initial set of feature maps with lower resolution
+ """
+
+ def __init__(self, in_chans=3, in_dim=64, dim=96, shuffle_down=False):
+ """
+ Args:
+ in_chans: number of input channels.
+ in_dim: intermediate feature size dimension to speed up stem.
+ dim: final stem channel number
+ shuffle_down: use PixelUnshuffle for down-sampling, effectively increases the receptive field
+ """
+
+ super().__init__()
+ # shuffle_down = False
+ if not shuffle_down:
+ self.proj = nn.Identity()
+ self.conv_down = nn.Sequential(
+ Conv2d_BN(in_chans, in_dim, 3, 2, 1, bias=False),
+ nn.ReLU(),
+ Conv2d_BN(in_dim, dim, 3, 2, 1, bias=False),
+ nn.ReLU()
+ )
+ else:
+ self.proj = lambda x: pixel_unshuffle(x, factor=4)
+ self.conv_down = nn.Sequential(Conv2d_BN(in_chans*16, dim, 3, 1, 1),
+ nn.ReLU(),
+ )
+
+ def forward(self, x):
+ x = self.proj(x)
+ x = self.conv_down(x)
+ return x
+
+
+
+class ConvBlock(nn.Module):
+ """
+ Convolutional block, used in first couple of stages
+ Experimented with plan resnet-18 like modules, they are the best in terms of throughput
+ Finally, YOLOv8 idea seem to work fine (resnet-18 like block with squeezed feature dimension, and feature concatendation at the end)
+ """
+ def __init__(self, dim,
+ drop_path=0.,
+ layer_scale=None,
+ kernel_size=3,
+ ):
+ super().__init__()
+
+ self.conv1 = Conv2d_BN(dim, dim, kernel_size=kernel_size, stride=1, padding=1)
+ self.act1 = nn.GELU()
+
+ self.conv2 = Conv2d_BN(dim, dim, kernel_size=kernel_size, stride=1, padding=1)
+
+ self.layer_scale = layer_scale
+ if layer_scale is not None and type(layer_scale) in [int, float]:
+ self.gamma = nn.Parameter(layer_scale * torch.ones(dim))
+ self.layer_scale = True
+ else:
+ self.layer_scale = False
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+
+ def forward(self, x):
+ input = x
+
+ x = self.conv1(x)
+ x = self.act1(x)
+ x = self.conv2(x)
+
+ if self.layer_scale:
+ x = x * self.gamma.view(1, -1, 1, 1)
+ x = input + self.drop_path(x)
+ return x
+
+
+class WindowAttention(nn.Module):
+ # Windowed Attention from SwinV2
+ # use a MLP trick to deal with various input image resolutions, then fold it to improve speed
+
+ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, resolution=0,
+ seq_length=0, dim_out=None, multi_query=False, shift_size=0, cpb_mlp_hidden=512):
+ # taken from EdgeViT and tweaked with attention bias.
+ super().__init__()
+ if not dim_out: dim_out = dim
+ self.shift_size = shift_size
+ self.multi_query = multi_query
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.head_dim = dim // num_heads
+
+ self.dim_internal = dim
+
+ self.scale = qk_scale or head_dim ** -0.5
+ if not multi_query:
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ else:
+ self.qkv = nn.Linear(dim, dim + 2*self.head_dim, bias=qkv_bias)
+
+ self.proj = nn.Linear(dim, dim_out, bias=False)
+ # attention positional bias
+ self.pos_emb_funct = PosEmbMLPSwinv2D(window_size=[resolution, resolution],
+ pretrained_window_size=[resolution, resolution],
+ num_heads=num_heads,
+ seq_length=seq_length,
+ cpb_mlp_hidden=cpb_mlp_hidden)
+
+ self.resolution = resolution
+
+ def forward(self, x, attn_mask = None):
+ B, N, C = x.shape
+
+ if not self.multi_query:
+ qkv = self.qkv(x).reshape(B, -1, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+ q, k, v = qkv[0], qkv[1], qkv[2]
+ else:
+ qkv = self.qkv(x)
+ (q, k, v) = qkv.split([self.dim_internal, self.head_dim, self.head_dim], dim=2)
+
+ q = q.reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
+ k = k.reshape(B, -1, 1, C // self.num_heads).permute(0, 2, 1, 3)
+ v = v.reshape(B, -1, 1, C // self.num_heads).permute(0, 2, 1, 3)
+
+ attn = (q @ k.transpose(-2, -1)) * self.scale
+
+ attn = self.pos_emb_funct(attn)
+
+ #add window shift
+ if attn_mask is not None:
+ nW = attn_mask.shape[0]
+ attn = attn.view(B // nW, nW, self.num_heads, N, N) + attn_mask.unsqueeze(1).unsqueeze(0)
+ attn = attn.view(-1, self.num_heads, N, N)
+
+ attn = attn.softmax(dim=-1)
+ x = (attn @ v).transpose(1, 2).reshape(B, -1, C)
+ x = self.proj(x)
+ return x
+
+
+
+class ERADIOLayer(nn.Module):
+ """
+ E-RADIO Layer
+ """
+
+ def __init__(self,
+ dim,
+ depth,
+ num_heads,
+ window_size,
+ conv=False,
+ downsample=True,
+ mlp_ratio=4.,
+ qkv_bias=False,
+ qk_scale=None,
+ norm_layer=nn.LayerNorm,
+ drop_path=0.,
+ layer_scale=None,
+ layer_scale_conv=None,
+ sr_dim_ratio=1,
+ sr_ratio=1,
+ multi_query=False,
+ use_swiglu=True,
+ yolo_arch=False,
+ downsample_shuffle=False,
+ conv_base=False,
+ use_shift=False,
+ cpb_mlp_hidden=512,
+ conv_groups_ratio=0,
+ verbose: bool = True,
+
+ ):
+ """
+ Args:
+ dim: feature size dimension.
+ depth: number of layers in each stage.
+ input_resolution: input image resolution.
+ window_size: window size in each stage.
+ downsample: bool argument for down-sampling.
+ mlp_ratio: MLP ratio.
+ num_heads: number of heads in each stage.
+ qkv_bias: bool argument for query, key, value learnable bias.
+ qk_scale: bool argument to scaling query, key.
+ drop: dropout rate.
+ attn_drop: attention dropout rate.
+ drop_path: drop path rate.
+ norm_layer: normalization layer.
+ layer_scale: layer scaling coefficient.
+ use_shift: SWIN like window shifting for half the window size for every alternating layer (considering multi-resolution)
+ conv_groups_ratio: group ratio for conv when no subsampling in multi-res attention
+ """
+
+ super().__init__()
+ self.conv = conv
+ self.yolo_arch=False
+ self.verbose = verbose
+ if conv:
+ if not yolo_arch:
+ self.blocks = nn.ModuleList([
+ ConvBlock(dim=dim,
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
+ layer_scale=layer_scale_conv)
+ for i in range(depth)])
+ self.blocks = nn.Sequential(*self.blocks)
+ else:
+ self.blocks = C2f(dim,dim,n=depth,shortcut=True,e=0.5)
+ self.yolo_arch=True
+ else:
+ if not isinstance(window_size, list): window_size = [window_size]
+ self.window_size = window_size[0]
+ self.do_single_windowing = True
+ if not isinstance(sr_ratio, list): sr_ratio = [sr_ratio]
+ self.sr_ratio = sr_ratio
+ if any([sr!=1 for sr in sr_ratio]) or len(set(window_size))>1:
+ self.do_single_windowing = False
+ do_windowing = True
+ else:
+ self.do_single_windowing = True
+ do_windowing = False
+
+ #for v2_2
+ if conv_groups_ratio != -1:
+ self.do_single_windowing = False
+ do_windowing = True
+
+ self.blocks = nn.ModuleList()
+ for i in range(depth):
+ self.blocks.append(
+ MultiResolutionAttention(window_size=window_size,
+ sr_ratio=sr_ratio,
+ dim=dim,
+ dim_ratio = sr_dim_ratio,
+ num_heads=num_heads,
+ norm_layer=norm_layer,
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
+ layer_scale=layer_scale,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ use_swiglu=use_swiglu,
+ do_windowing=do_windowing,
+ multi_query=multi_query,
+ conv_base=conv_base,
+ cpb_mlp_hidden=cpb_mlp_hidden,
+ use_shift =0 if ((not use_shift) or ((i) % 2 == 0)) else True ,
+ conv_groups_ratio=conv_groups_ratio,
+ ))
+ self.blocks = nn.Sequential(*self.blocks)
+
+ self.transformer = not conv
+ self.downsample = None if not downsample else Downsample(dim=dim, shuffle=downsample_shuffle)
+
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+
+ # do padding for transforemr
+ interpolate = True
+ if self.transformer and interpolate:
+ # Windowed Attention will split feature map into windows with the size of window_size x window_size
+ # if the resolution is not divisible by window_size, we need to interpolate the feature map
+ # can be done via padding, but doing so after training hurts the model performance.
+ # interpolation affects the performance as well, but not as much as padding
+ if isinstance(self.window_size, list) or isinstance(self.window_size, tuple):
+ current_max_window_size = max(self.window_size)
+ else:
+ current_max_window_size = self.window_size
+
+ max_window_size = max([res_upsample*current_max_window_size for res_upsample in self.sr_ratio])
+ if H % max_window_size != 0 or W % max_window_size != 0:
+ new_h = int(np.ceil(H/max_window_size)*max_window_size)
+ new_w = int(np.ceil(W/max_window_size)*max_window_size)
+ x = F.interpolate(x, size=(new_h, new_w), mode='nearest')
+ if self.verbose:
+ warnings.warn(f"Choosen window size is not optimal for given resolution. Interpolation of features maps will be done and it can affect the performance. Max window size is {max_window_size}, feature map size is {H}x{W}, interpolated feature map size is {new_h}x{new_w}.")
+
+
+ if self.transformer and self.do_single_windowing:
+ H, W = x.shape[2], x.shape[3]
+ x, pad_hw = window_partition(x, self.window_size)
+
+ #run main blocks
+ x = self.blocks(x)
+
+ if self.transformer and self.do_single_windowing:
+ x = window_reverse(x, self.window_size, H, W, pad_hw)
+
+ if self.transformer and interpolate:
+ #lets keep original resolution, might be not ideal, but for the upsampling tower we need to keep the expected resolution.
+ x = F.interpolate(x, size=(H, W), mode='nearest')
+
+ if self.downsample is None:
+ return x, x
+
+ return self.downsample(x), x # changing to output pre downsampled features
+
+
+class InterpolateLayer(nn.Module):
+ def __init__(self, size=None, scale_factor=None, mode='nearest'):
+ super(InterpolateLayer, self).__init__()
+ self.size = size
+ self.scale_factor = scale_factor
+ self.mode = mode
+
+ def forward(self, x):
+ return F.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode)
+
+
+class HiResNeck(nn.Module):
+ """
+ The block is used to output dense features from all stages
+ Otherwise, by default, only the last stage features are returned with E-RADIO
+ """
+ def __init__(self, dim, depths, neck_start_stage, full_features_head_dim, downsample_enabled):
+
+ '''
+ Hi Resolution neck to support output of high res features that are useful for dense tasks.
+ depths - total number of layers in the base model
+ neck_start_stage - when to start the neck, 0 - start from the first stage, 1 - start from the second stage etc.
+ earlier layers result in higher resolution features at the cost of compute
+ full_features_head_dim - number of channels in the dense features head
+ '''
+ super().__init__()
+ # create feature projection layers for segmentation output
+ self.neck_features_proj = nn.ModuleList()
+ self.neck_start_stage = neck_start_stage
+ upsample_ratio = 1
+ for i in range(len(depths)):
+ level_n_features_output = int(dim * 2 ** i)
+
+ if self.neck_start_stage > i: continue
+
+ if (upsample_ratio > 1) or full_features_head_dim!=level_n_features_output:
+ feature_projection = nn.Sequential()
+ if False:
+ feature_projection.add_module("norm",nn.BatchNorm2d(level_n_features_output)) #fast, but worse
+ feature_projection.add_module("dconv", nn.ConvTranspose2d(level_n_features_output,
+ full_features_head_dim, kernel_size=upsample_ratio, stride=upsample_ratio))
+ else:
+ # B, in_channels, H, W -> B, in_channels, H*upsample_ratio, W*upsample_ratio
+ # print("upsample ratio", upsample_ratio, level_n_features_output, level_n_features_output)
+ feature_projection.add_module("upsample", InterpolateLayer(scale_factor=upsample_ratio, mode='nearest'))
+ feature_projection.add_module("conv1", nn.Conv2d(level_n_features_output, level_n_features_output, kernel_size=3, stride=1, padding=1, groups=level_n_features_output))
+ feature_projection.add_module("norm",nn.BatchNorm2d(level_n_features_output))
+ # B, in_channels, H*upsample_ratio, W*upsample_ratio -> B, full_features_head_dim, H*upsample_ratio, W*upsample_ratio
+ feature_projection.add_module("conv2", nn.Conv2d(level_n_features_output, full_features_head_dim, kernel_size=1, stride=1, padding=0))
+ else:
+ feature_projection = nn.Sequential()
+
+ self.neck_features_proj.append(feature_projection)
+
+ if i>0 and downsample_enabled[i]:
+ upsample_ratio *= 2
+
+ def forward(self, x, il_level=-1, full_features=None):
+ if self.neck_start_stage > il_level:
+ return full_features
+
+ if full_features is None:
+ full_features = self.neck_features_proj[il_level - self.neck_start_stage](x)
+ else:
+ #upsample torch tensor x to match full_features size, and add to full_features
+ feature_projection = self.neck_features_proj[il_level - self.neck_start_stage](x)
+ if feature_projection.shape[2] != full_features.shape[2] or feature_projection.shape[3] != full_features.shape[3]:
+ feature_projection = torch.nn.functional.pad(feature_projection, ( 0, -feature_projection.shape[3] + full_features.shape[3], 0, -feature_projection.shape[2] + full_features.shape[2]))
+ full_features = full_features + feature_projection
+ return full_features
+
+class ERADIO(nn.Module):
+ """
+ Efficient RADIO
+ """
+
+ def __init__(self,
+ dim,
+ in_dim,
+ depths,
+ window_size,
+ mlp_ratio,
+ num_heads,
+ drop_path_rate=0.2,
+ in_chans=3,
+ num_classes=1000,
+ qkv_bias=False,
+ qk_scale=None,
+ layer_scale=None,
+ layer_scale_conv=None,
+ layer_norm_last=False,
+ sr_ratio = [1, 1, 1, 1],
+ max_depth = -1,
+ conv_base=False,
+ use_swiglu=False,
+ multi_query=False,
+ norm_layer=nn.LayerNorm,
+ drop_uniform=False,
+ yolo_arch=False,
+ shuffle_down=False,
+ downsample_shuffle=False,
+ return_full_features=False,
+ full_features_head_dim=128,
+ neck_start_stage=1,
+ use_neck=False,
+ use_shift=False,
+ cpb_mlp_hidden=512,
+ conv_groups_ratio=0,
+ verbose: bool = False,
+ **kwargs):
+ """
+ Args:
+ dim: feature size dimension.
+ depths: number of layers in each stage.
+ window_size: window size in each stage.
+ mlp_ratio: MLP ratio.
+ num_heads: number of heads in each stage.
+ drop_path_rate: drop path rate.
+ in_chans: number of input channels.
+ num_classes: number of classes.
+ qkv_bias: bool argument for query, key, value learnable bias.
+ qk_scale: bool argument to scaling query, key.
+ drop_rate: dropout rate.
+ attn_drop_rate: attention dropout rate.
+ norm_layer: normalization layer.
+ layer_scale: layer scaling coefficient.
+ return_full_features: output dense features as well as logits
+ full_features_head_dim: number of channels in the dense features head
+ neck_start_stage: a stage id to start full feature neck. Model has 4 stages, indix starts with 0
+ for 224 resolution, the output of the stage before downsample:
+ stage 0: 56x56, stage 1: 28x28, stage 2: 14x14, stage 3: 7x7
+ use_neck: even for summarization embedding use neck
+ use_shift: SWIN like window shifting but without masking attention
+ conv_groups_ratio: will be used for conv blocks where there is no multires attention,
+ if 0 then normal conv,
+ if 1 then channels are independent,
+ if -1 then no conv at all
+
+ """
+ super().__init__()
+
+ num_features = int(dim * 2 ** (len(depths) - 1))
+ self.num_classes = num_classes
+ self.patch_embed = PatchEmbed(in_chans=in_chans, in_dim=in_dim, dim=dim, shuffle_down=shuffle_down)
+ # set return_full_features true if we want to return full features from all stages
+ self.return_full_features = return_full_features
+ self.use_neck = use_neck
+
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
+ if drop_uniform:
+ dpr = [drop_path_rate for x in range(sum(depths))]
+
+ if not isinstance(max_depth, list): max_depth = [max_depth] * len(depths)
+
+ self.levels = nn.ModuleList()
+ for i in range(len(depths)):
+ conv = True if (i == 0 or i == 1) else False
+
+ level = ERADIOLayer(dim=int(dim * 2 ** i),
+ depth=depths[i],
+ num_heads=num_heads[i],
+ window_size=window_size[i],
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ conv=conv,
+ drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])],
+ downsample=(i < len(depths) - 1),
+ layer_scale=layer_scale,
+ layer_scale_conv=layer_scale_conv,
+ sr_ratio=sr_ratio[i],
+ use_swiglu=use_swiglu,
+ multi_query=multi_query,
+ norm_layer=norm_layer,
+ yolo_arch=yolo_arch,
+ downsample_shuffle=downsample_shuffle,
+ conv_base=conv_base,
+ cpb_mlp_hidden=cpb_mlp_hidden,
+ use_shift=use_shift,
+ conv_groups_ratio=conv_groups_ratio,
+ verbose=verbose)
+
+ self.levels.append(level)
+
+ if self.return_full_features or self.use_neck:
+ #num_heads
+ downsample_enabled = [self.levels[i-1].downsample is not None for i in range(len(self.levels))]
+ self.high_res_neck = HiResNeck(dim, depths, neck_start_stage, full_features_head_dim, downsample_enabled)
+
+ self.switched_to_deploy = False
+
+ self.norm = LayerNorm2d(num_features) if layer_norm_last else nn.BatchNorm2d(num_features)
+ self.avgpool = nn.AdaptiveAvgPool2d(1)
+ self.head = nn.Linear(num_features, num_classes) if num_classes > 0 else nn.Identity()
+ self.apply(self._init_weights)
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+ elif isinstance(m, LayerNorm2d):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+ elif isinstance(m, nn.BatchNorm2d):
+ nn.init.ones_(m.weight)
+ nn.init.zeros_(m.bias)
+
+ @torch.jit.ignore
+ def no_weight_decay_keywords(self):
+ return {'rpb'}
+
+ def forward_features(self, x):
+ _, _, H, W = x.shape
+ if H % 32 != 0 or W % 32 != 0:
+ raise ValueError(f"E-RADIO requires input dimensions to be divisible by 32 but got H x W: {H} x {W}")
+ x = self.patch_embed(x)
+ full_features = None
+ for il, level in enumerate(self.levels):
+ x, pre_downsample_x = level(x)
+
+ if self.return_full_features or self.use_neck:
+ full_features = self.high_res_neck(pre_downsample_x, il, full_features)
+
+ # x = self.norm(full_features if (self.return_full_features or self.use_neck) else x)
+ x = self.norm(x) # new version for
+
+ if not self.return_full_features:
+ return x, None
+
+ return x, full_features
+
+ def forward(self, x):
+ x, full_features = self.forward_features(x)
+
+ x = self.avgpool(x)
+ x = torch.flatten(x, 1)
+
+ x = self.head(x)
+ if full_features is not None:
+ return x, full_features
+ return x
+
+ def switch_to_deploy(self):
+ '''
+ A method to perform model self-compression
+ merges BN into conv layers
+ converts MLP relative positional bias into precomputed buffers
+ '''
+ if not self.switched_to_deploy:
+ for level in [self.patch_embed, self.levels, self.head]:
+ for module in level.modules():
+ if hasattr(module, 'switch_to_deploy'):
+ module.switch_to_deploy()
+ self.switched_to_deploy = True
+
+
+ def change_window_size(self, new_window_size):
+ """
+ E-RADIO employs windowed attention, which may be sensitive to the choice of this parameter,
+ especially in cases of uneven partitioning of the feature maps.
+ E-RADIO allows for the adjustment of the window size after training,
+ making it adaptable to different input image resolutions.
+ The recommended values for window size based on input resolution are as follows:
+
+ Input Resolution | Window Size
+ 224 | 7
+ 256 | 8
+ 386 | 12
+ 512 | 16
+ Ideally, the window size should be a factor of the input resolution. In the third stage, we divide the resolution by 16, so the window size should be
+ img_res/16/2
+ for the third stage and img_res/32 for the last stage. While this can be applied in a brute-force manner, a better way is to do model.change_window_size.
+ Manual way to change resolution -> model.change_window_size(resolution)
+ """
+ window_size = new_window_size
+ print(f"Setting window size to {window_size}")
+ for module in self.modules():
+ if hasattr(module, "window_size"):
+ # check if tuple or a number
+ if isinstance(module.window_size, tuple):
+ if module.window_size[0] != window_size:
+ module.window_size = (window_size, window_size)
+ elif isinstance(module.window_size, list):
+ if module.window_size[0] != window_size:
+ module.window_size = [window_size, window_size]
+ else:
+ module.window_size = window_size
+
+
+ def set_optimal_window_size(self, image_dim, max_window_size = 16):
+ """
+ Using hand picked window size for various resolutions.
+
+ E-RADIO employs windowed attention, which may be sensitive to the choice of this parameter,
+ especially in cases of uneven partitioning of the feature maps.
+ E-RADIO allows for the adjustment of the window size after training,
+ making it adaptable to different input image resolutions.
+ The recommended values for window size based on input resolution are as follows:
+
+ Input Resolution | Window Size
+ 224 | 7
+ 256 | 8
+ 386 | 12
+ 512 | 16
+ Ideally, the window size should be a factor of the input resolution. In the third stage, we divide the resolution by 16, so the window size should be
+ img_res/16/2
+ for the third stage and img_res/32 for the last stage. While this can be applied in a brute-force manner, a better way is to do model.change_window_size.
+ Manual way to change resolution -> model.change_window_size(resolution)
+
+ """
+ # import math
+
+ def divisorGenerator(n):
+ large_divisors = []
+ for i in range(1, int(math.sqrt(n) + 1)):
+ if n % i == 0:
+ yield i
+ if i*i != n:
+ large_divisors.append(n / i)
+ for divisor in reversed(large_divisors):
+ yield divisor
+
+ if isinstance(image_dim, list) or isinstance(image_dim, tuple):
+ image_dim = min(image_dim)
+
+ # we do windowed attention in the 3rd stage for the first time, therefore //16,
+ # we do subsampled attention with downsample by 2 so need to get //32 actually
+ # ideally we should rewrite this to be dependent on the structure of the model like what if subsampled is removed etc
+ all_divisors = np.array(list(divisorGenerator(image_dim//32)))
+ new_window_size = int(min(all_divisors[all_divisors <= max_window_size][-1], max_window_size))
+
+ # for image_dim in [128, 224, 256, 384, 512, 768, 1024]:
+ # all_divisors = np.array(list(divisorGenerator(image_dim//32)))
+ # new_window_size = int(min(all_divisors[all_divisors <= max_window_size][-1], max_window_size))
+ # print(f"Setting window size to {new_window_size} for image resolution {image_dim}")
+
+ self.change_window_size(new_window_size = new_window_size)
+
+
+@register_model
+def eradio_large_fullres_ws16(pretrained=False, **kwargs):
+ model = ERADIO(
+ depths=[3, 3, 5, 5],
+ num_heads=[2, 4, 8, 16],
+ window_size=[None, None, [16, 16], 16],
+ dim=192,
+ in_dim=64,
+ mlp_ratio=4,
+ drop_path_rate=0.0,
+ sr_ratio=[1, 1, [2, 1], 1],
+ use_swiglu=False,
+ yolo_arch=True,
+ shuffle_down=False,
+ conv_base=True,
+ use_neck=True,
+ full_features_head_dim=1536,
+ neck_start_stage=2,
+ **kwargs,
+ )
+ if pretrained:
+ model.load_state_dict(torch.load(pretrained)["state_dict"])
+ return model
+
+
+@register_model
+def eradio_xxxtiny(pretrained=False, **kwargs): # ,
+ model = ERADIO(
+ depths=[1, 3, 4, 5],
+ num_heads=[2, 4, 8, 16],
+ window_size=[None, None, [16, 16], 16],
+ dim=32,
+ in_dim=32,
+ mlp_ratio=4,
+ drop_path_rate=0.0,
+ sr_ratio=[1, 1, [2, 1], 1],
+ use_swiglu=False,
+ yolo_arch=True,
+ shuffle_down=False,
+ conv_base=True,
+ use_neck=True,
+ full_features_head_dim=256,
+ neck_start_stage=2,
+ **kwargs,
+ )
+ if pretrained:
+ model.load_state_dict(torch.load(pretrained))
+ return model
+
+@register_model
+def eradio_xxxtiny_8x_ws12(pretrained=False, **kwargs):
+ model = ERADIO(depths=[1, 3, 4, 5],
+ num_heads=[2, 4, 8, 16],
+ window_size=[None, None, [12, 12], 12],
+ dim=32,
+ in_dim=32,
+ mlp_ratio=4,
+ drop_path_rate=0.0,
+ sr_ratio=[1, 1, [2, 1], 1],
+ use_swiglu=False,
+ downsample_shuffle=False,
+ yolo_arch=True,
+ shuffle_down=False,
+ cpb_mlp_hidden=64,
+ use_neck=True,
+ full_features_head_dim=256,
+ neck_start_stage=2,
+ conv_groups_ratio = 1,
+ **kwargs)
+ if pretrained:
+ model.load_state_dict(torch.load(pretrained)["state_dict"])
+ return model
+
+
+@register_model
+def eradio_xxxtiny_8x_ws16(pretrained=False, **kwargs):
+ model = ERADIO(depths=[1, 3, 4, 5],
+ num_heads=[2, 4, 8, 16],
+ window_size=[None, None, [16, 16], 16],
+ dim=32,
+ in_dim=32,
+ mlp_ratio=4,
+ drop_path_rate=0.0,
+ sr_ratio=[1, 1, [2, 1], 1],
+ use_swiglu=False,
+ downsample_shuffle=False,
+ yolo_arch=True,
+ shuffle_down=False,
+ cpb_mlp_hidden=64,
+ use_neck=True,
+ full_features_head_dim=256,
+ neck_start_stage=1,
+ conv_groups_ratio = 1,
+ **kwargs)
+ if pretrained:
+ model.load_state_dict(torch.load(pretrained)["state_dict"])
+ return model
+
+@register_model
+def eradio(pretrained=False, **kwargs):
+ return eradio_large_fullres_ws16(pretrained=pretrained, **kwargs)
diff --git a/tim/models/nvidia_radio/radio/extra_models.py b/tim/models/nvidia_radio/radio/extra_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9ca9eea88c3b35a68cca124e89e6e58931b4f3d
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/extra_models.py
@@ -0,0 +1,206 @@
+from distutils.version import LooseVersion
+from types import MethodType
+from typing import List, Optional, Tuple, Union
+import warnings
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from timm.models.registry import register_model
+from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
+
+from .forward_intermediates import forward_intermediates
+from .input_conditioner import InputConditioner
+
+_has_torch_sdpa = hasattr(F, 'scaled_dot_product_attention')
+
+
+class PaliGemmaWrapper(nn.Module):
+ def __init__(self, vis_model: nn.Module, embed_dim: int):
+ super().__init__()
+
+ self.vis_model = vis_model
+ self.embed_dim = embed_dim
+
+ @property
+ def patch_size(self):
+ return self.vis_model.embeddings.patch_size
+
+ @property
+ def blocks(self):
+ return self.vis_model.encoder.layers
+
+ @property
+ def embed_dim(self):
+ return self.vis_model.embeddings.embed_dim
+
+ def forward(self, x: torch.Tensor):
+ outputs = self.vis_model(
+ x,
+ return_dict=False,
+ interpolate_pos_encoding=True,
+ )
+
+ features = outputs[0].to(torch.float32)
+
+ summary = features.mean(dim=1)
+
+ return summary, features
+
+ def forward_features(self, x: torch.Tensor):
+ return self(x)
+
+
+def _get_paligemma_model(repo: str, embed_dim: int = None, dtype: torch.dtype = torch.bfloat16):
+ from transformers import PaliGemmaForConditionalGeneration, __version__ as tx_version
+
+ if LooseVersion(tx_version) > LooseVersion('4.44.2'):
+ warnings.warn(f'Your transformers version "{tx_version}" is higher than 4.44.2, and for whatever reason, PaliGemma might be broken.')
+
+ extra_args = dict()
+
+ if dtype is not None:
+ extra_args['torch_dtype'] = dtype
+ rev = str(dtype).split('.')[-1]
+ extra_args['revision'] = rev
+
+ model = PaliGemmaForConditionalGeneration.from_pretrained(repo, **extra_args)
+
+ vis_model = model.vision_tower.vision_model
+
+ vis_model = PaliGemmaWrapper(vis_model, embed_dim)
+
+ return vis_model
+
+@register_model
+def paligemma_896_student(**kwargs):
+ model = _get_paligemma_model('google/paligemma-3b-pt-896', embed_dim=1152, dtype=None)
+
+ return model
+
+
+def dv2_sdpa(self, x: torch.Tensor) -> torch.Tensor:
+ B, N, C = x.shape
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+
+ q, k, v = qkv[0], qkv[1], qkv[2]
+ x = F.scaled_dot_product_attention(
+ q, k, v,
+ is_causal=False,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ scale=self.scale,
+ )
+ x = x.transpose(1, 2).reshape(B, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+def _load_dino_v2(dino_v2_model, cache_dir: Optional[str] = None, pretrained=True, **kwargs):
+ if cache_dir:
+ torch.hub.set_dir(cache_dir)
+ model: nn.Module = torch.hub.load(
+ 'facebookresearch/dinov2',
+ dino_v2_model,
+ pretrained=pretrained,
+ # **kwargs,
+ )
+
+ if _has_torch_sdpa:
+ for n, m in model.named_modules():
+ if n.endswith('.attn'):
+ m.forward = MethodType(dv2_sdpa, m)
+
+ return model
+
+class DinoWrapper(nn.Module):
+ def __init__(self, dino_model: nn.Module):
+ super().__init__()
+
+ self.inner = dino_model
+ dino_model.blocks = nn.Sequential(*dino_model.blocks)
+
+ @property
+ def embed_dim(self):
+ return self.inner.embed_dim
+
+ @property
+ def patch_size(self):
+ return self.inner.patch_size
+
+ @property
+ def num_cls_tokens(self):
+ return getattr(self.inner, 'num_tokens', 1)
+
+ @property
+ def num_registers(self):
+ return getattr(self.inner, 'num_register_tokens', 0)
+
+ @property
+ def num_summary_tokens(self):
+ return self.num_cls_tokens + self.num_registers
+
+ @property
+ def blocks(self):
+ return self.inner.blocks
+
+ def forward(self, *args, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
+ parts = self.inner.forward_features(*args, **kwargs)
+
+ cls_token = parts['x_norm_clstoken']
+ features = parts['x_norm_patchtokens']
+
+ return cls_token, features
+
+ def forward_features(self, x: torch.Tensor):
+ x = self.inner.prepare_tokens_with_masks(x)
+ x = self.inner.blocks(x)
+ x_norm = self.inner.norm(x)
+
+ return x_norm[:, 0], x_norm[:, self.num_summary_tokens:]
+
+ def patchify(self, x: torch.Tensor) -> torch.Tensor:
+ return self.inner.prepare_tokens_with_masks(x)
+
+ def forward_intermediates(self,
+ x: torch.Tensor,
+ norm: bool = False,
+ **kwargs,
+ ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
+ return forward_intermediates(
+ self,
+ patch_extractor=self.inner.prepare_tokens_with_masks,
+ num_summary_tokens=self.num_summary_tokens,
+ num_cls_tokens=self.num_cls_tokens,
+ norm=self.inner.norm if norm else lambda y: y,
+ x=x,
+ **kwargs,
+ )
+
+
+def _dino_student(arch: str, **kwargs):
+ from . import dinov2_arch
+
+ factory = getattr(dinov2_arch, arch)
+ model = factory()
+
+ model = DinoWrapper(model)
+
+ conditioner = InputConditioner(
+ input_scale=1.0,
+ norm_mean=IMAGENET_DEFAULT_MEAN,
+ norm_std=IMAGENET_DEFAULT_STD,
+ )
+
+ model.input_conditioner = conditioner
+
+ return model
+
+
+@register_model
+def dino_v2_l_student(**kwargs):
+ return _dino_student('dinov2_vitl14_reg', **kwargs)
+
+@register_model
+def dino_v2_g_student(**kwargs):
+ return _dino_student('dinov2_vitg14_reg', **kwargs)
diff --git a/tim/models/nvidia_radio/radio/extra_timm_models.py b/tim/models/nvidia_radio/radio/extra_timm_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..c912b156c27e68b3e780e21d7d4393e93b8cc425
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/extra_timm_models.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+import math
+import warnings
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from timm.models import register_model
+from timm.models.vision_transformer import (
+ VisionTransformer,
+ _create_vision_transformer as _timm_create_vision_transformer,
+ Mlp,
+ Block,
+ LayerScale as TIMMLayerScale,
+)
+
+# Import these to also register them
+from . import dinov2_arch
+
+
+@register_model
+def vit_tiny_patch14_224(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-Tiny (Vit-Ti/16)
+ """
+ model_args = dict(patch_size=14, embed_dim=192, depth=12, num_heads=3)
+ model = _create_vision_transformer('vit_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_small_patch14_224(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-Small (ViT-S/16)
+ """
+ model_args = dict(patch_size=14, embed_dim=384, depth=12, num_heads=6)
+ model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_base_patch14_224(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-Base (ViT-B/14) from original paper (https://arxiv.org/abs/2010.11929).
+ ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
+ """
+ model_args = dict(patch_size=14, embed_dim=768, depth=12, num_heads=12)
+ model = _create_vision_transformer('vit_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_base_patch16_v2_224(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
+ ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
+ """
+ model_args = dict(
+ patch_size=16, embed_dim=768, depth=12, num_heads=12, init_values=1e-5,
+ reg_tokens=4, no_embed_class=True, img_size=518 * 16 // 14
+ )
+ model = _create_vision_transformer(
+ 'vit_base_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_large_patch16_v2_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
+ """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
+ ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
+ """
+ name = 'vit_large_patch14_reg4_dinov2'
+ model_args = dict(
+ patch_size=16, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5,
+ reg_tokens=4, no_embed_class=True, img_size=518 * 16 // 14
+ )
+ model = _create_vision_transformer(name, pretrained=pretrained, **dict(model_args, **kwargs))
+
+ return model
+
+@register_model
+def vit_huge_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-Huge model (ViT-H/16) from original paper (https://arxiv.org/abs/2010.11929).
+ """
+ model_args = dict(patch_size=16, embed_dim=1280, depth=32, num_heads=16)
+ if pretrained:
+ # There is no pretrained version of ViT-H/16, but we can adapt a ViT-H/14 for this purpose
+ model = _create_vision_transformer('vit_huge_patch14_224', pretrained=True, **dict(model_args, **kwargs))
+ else:
+ model = _create_vision_transformer('vit_huge_patch16_224', pretrained=False, **dict(model_args, **kwargs))
+ return model
+
+
+@register_model
+def vit_huge_patch16_224_mlpnorm(pretrained=False, **kwargs) -> VisionTransformer:
+ """ ViT-Huge model (ViT-H/16) from original paper (https://arxiv.org/abs/2010.11929).
+ """
+ model = vit_huge_patch16_224(pretrained=pretrained, **kwargs)
+
+ for m in model.modules():
+ if isinstance(m, Mlp) and not isinstance(m.norm, nn.LayerNorm):
+ m.norm = nn.LayerNorm(m.fc1.out_features)
+
+ return model
+
+
+@register_model
+def vit_giant_patch16_224(pretrained=False, scaled_ln: bool = False, **kwargs) -> VisionTransformer:
+ """ ViT-giant model (ViT-g/16) from original paper (https://arxiv.org/abs/2010.11929).
+ """
+ model_args = dict(patch_size=16, embed_dim=1536, depth=40, num_heads=24)
+ model = _create_vision_transformer('vit_giant_patch16_224', pretrained=False, **dict(model_args, **kwargs))
+ if scaled_ln:
+ _apply_scaled_ln(model)
+ return model
+
+
+@register_model
+def vit_bigG_patch14_224(pretrained=False, **kwargs) -> VisionTransformer:
+ model_args = dict(patch_size=14, embed_dim=1664, depth=48, num_heads=16, init_values=1e-6)
+ model = _create_vision_transformer('vit_bigG_patch14', pretrained=False, **dict(model_args, **kwargs))
+ return model
+
+
+def _create_vision_transformer(*args, **kwargs):
+ model = _timm_create_vision_transformer(*args, **kwargs)
+ _patch_layer_scale(model)
+ return model
+
+
+def _patch_layer_scale(model: VisionTransformer):
+ def replace_ls(old_ls: TIMMLayerScale):
+ new_ls = dinov2_arch.LayerScale(old_ls.gamma.shape[0], inplace=old_ls.inplace)
+ new_ls.load_state_dict(old_ls.state_dict())
+ return new_ls
+
+ # Monkey patch: Replace TIMM's LayerScale with our modified DINOv2 one, that uses a param name
+ # other than gamma, so that HFHub doesn't mess with it!
+ for mod in model.modules():
+ if isinstance(mod, Block):
+ if isinstance(mod.ls1, TIMMLayerScale):
+ mod.ls1 = replace_ls(mod.ls1)
+ if isinstance(mod.ls2, TIMMLayerScale):
+ mod.ls2 = replace_ls(mod.ls2)
+ pass
+
+
+class ScaledLayerNorm(nn.LayerNorm):
+ '''
+ https://arxiv.org/pdf/2502.05795v1
+ '''
+ def __init__(self, ln_base: nn.LayerNorm, depth: int = 0):
+ super().__init__(ln_base.normalized_shape, eps=ln_base.eps, elementwise_affine=ln_base.elementwise_affine)
+ self.load_state_dict(ln_base.state_dict())
+ self.register_buffer('ln_scale', torch.tensor(1.0 / math.sqrt(depth)), persistent=False)
+
+ def forward(self, x):
+ y = super().forward(x)
+ y = y * self.ln_scale
+ return y
+
+
+class DyT(nn.Module):
+ def __init__(self, C: int, init_alpha: float):
+ super().__init__()
+ self.alpha = nn.Parameter(torch.full((1,), init_alpha))
+ self.gamma = nn.Parameter(torch.ones(C))
+ self.beta = nn.Parameter(torch.zeros(C))
+
+ def forward(self, x: torch.Tensor):
+ x = F.tanh(self.alpha * x)
+ return self.gamma * x + self.beta
+
+@register_model
+def vit_large_dyt_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer:
+ """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
+ ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
+ """
+ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16)
+ model = _create_vision_transformer('vit_large_dyt_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
+
+ def _replace_ln_with_dyt(ln: nn.LayerNorm, depth: int):
+ return DyT(ln.normalized_shape[0], init_alpha=0.9)
+ _replace_ln(model, _replace_ln_with_dyt)
+
+ return model
+
+
+def _apply_scaled_ln(model: VisionTransformer):
+ warnings.warn('Post-LayerNorm scaling activated!')
+
+ _replace_ln(model, lambda ln, depth: ScaledLayerNorm(ln, depth=depth))
+
+def _replace_ln(model: VisionTransformer, fn):
+ def _inner_replace_ln(block: Block, depth: int, key: str):
+ prev = getattr(block, key)
+ if isinstance(prev, nn.LayerNorm):
+ setattr(block, key, fn(prev, depth=depth))
+
+ for i, block in enumerate(model.blocks):
+ _inner_replace_ln(block, i + 1, 'norm1')
+ _inner_replace_ln(block, i + 1, 'norm2')
diff --git a/tim/models/nvidia_radio/radio/feature_normalizer.py b/tim/models/nvidia_radio/radio/feature_normalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d4cd2706328bdea36bc9857102545102ef136d7
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/feature_normalizer.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+from collections import namedtuple
+from typing import NamedTuple, Optional, Tuple
+import torch
+from torch import nn
+
+
+def _run_kernel(x: torch.Tensor, mean: torch.Tensor, tx: torch.Tensor):
+ if x.ndim <= 3:
+ x = x - mean
+ x = x @ tx.T
+ elif x.ndim == 4:
+ x = x - mean.reshape(1, -1, 1, 1)
+ kernel = tx.reshape(*tx.shape, 1, 1)
+ x = torch.nn.functional.conv2d(x, weight=kernel, bias=None, stride=1, padding=0)
+ else:
+ raise ValueError(f'Unsupported input dimension: {x.ndim}, shape: {x.shape}')
+ return x
+
+
+class FeatureNormalizer(nn.Module):
+ def __init__(self, embed_dim: int, dtype: torch.dtype = torch.float32):
+ super().__init__()
+
+ self.register_buffer('mean', torch.zeros(embed_dim, dtype=dtype))
+ self.register_buffer('tx', torch.eye(embed_dim, dtype=dtype))
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = _run_kernel(x, self.mean, self.tx)
+ return x
+
+
+class InterFeatState(NamedTuple):
+ y: torch.Tensor
+ alpha: torch.Tensor
+
+
+class IntermediateFeatureNormalizerBase(nn.Module):
+ def forward(self, x: torch.Tensor, index: int, rot_index: int = None, skip: Optional[int] = None) -> InterFeatState:
+ raise NotImplementedError()
+
+
+class IntermediateFeatureNormalizer(IntermediateFeatureNormalizerBase):
+ def __init__(self, num_intermediates: int, embed_dim: int, rot_per_layer: bool = False, dtype: torch.dtype = torch.float32):
+ super().__init__()
+ self.register_buffer('alphas', torch.ones(num_intermediates, dtype=dtype))
+
+ rot = torch.eye(embed_dim, dtype=dtype)
+ if rot_per_layer:
+ rot = rot.unsqueeze(0).repeat(num_intermediates, 1, 1)
+
+ self.register_buffer('rotation', rot.contiguous())
+ self.register_buffer('means', torch.zeros(num_intermediates, embed_dim, dtype=dtype))
+
+ def forward(self, x: torch.Tensor, index: int, rot_index: int = None, skip: Optional[int] = None) -> InterFeatState:
+ if rot_index is None:
+ rot_index = index
+
+ if skip:
+ assert x.ndim == 3, f'Cannot use the `skip` parameter when the `x` tensor isn\'t 3-dimensional.'
+ prefix, x = x[:, :skip], x[:, skip:]
+
+ rotation = self._get_rotation(rot_index)
+ y = _run_kernel(x, self.means[index], rotation)
+
+ alpha = self.alphas[index]
+ if skip:
+ alpha = torch.cat([
+ torch.ones(skip, dtype=alpha.dtype, device=alpha.device),
+ alpha[None].expand(y.shape[1]),
+ ]).reshape(1, -1, 1)
+ y = torch.cat([prefix, y], dim=1)
+ else:
+ if x.ndim == 3:
+ alpha = alpha.reshape(1, 1, 1).expand(1, y.shape[1], 1)
+ elif x.ndim == 4:
+ alpha = alpha.reshape(1, 1, 1, 1).expand(1, 1, *y.shape[2:])
+ else:
+ raise ValueError(f'Unsupported input dimension: {x.ndim}')
+
+ return InterFeatState(y, alpha)
+
+ def _get_rotation(self, rot_index: int) -> torch.Tensor:
+ if self.rotation.ndim == 2:
+ return self.rotation
+ return self.rotation[rot_index]
+
+
+class NullIntermediateFeatureNormalizer(IntermediateFeatureNormalizerBase):
+ instances = dict()
+
+ def __init__(self, dtype: torch.dtype, device: torch.device):
+ super().__init__()
+ self.register_buffer('alpha', torch.tensor(1, dtype=dtype, device=device))
+
+ @staticmethod
+ def get_instance(dtype: torch.dtype, device: torch.device):
+ instance = NullIntermediateFeatureNormalizer.instances.get((dtype, device), None)
+ if instance is None:
+ instance = NullIntermediateFeatureNormalizer(dtype, device)
+ NullIntermediateFeatureNormalizer.instances[(dtype, device)] = instance
+ return instance
+
+ def forward(self, x: torch.Tensor, index: int, rot_index: int = None, skip: Optional[int] = None) -> InterFeatState:
+ return InterFeatState(x, self.alpha)
diff --git a/tim/models/nvidia_radio/radio/forward_intermediates.py b/tim/models/nvidia_radio/radio/forward_intermediates.py
new file mode 100644
index 0000000000000000000000000000000000000000..364137c8c991ad1eacabfd478d8550d335e2640e
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/forward_intermediates.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+from typing import Callable, Dict, List, Optional, Set, Tuple, Union, Any, Iterable
+from types import MethodType
+
+import torch
+from torch import nn
+
+from .feature_normalizer import IntermediateFeatureNormalizerBase, NullIntermediateFeatureNormalizer
+
+
+def _take_indices(
+ num_blocks: int,
+ n: Optional[Union[int, List[int], Tuple[int]]],
+) -> Tuple[Set[int], int]:
+ if isinstance(n, int):
+ assert n >= 0
+ take_indices = {x for x in range(num_blocks - n, num_blocks)}
+ else:
+ take_indices = {num_blocks + idx if idx < 0 else idx for idx in n}
+ return take_indices, max(take_indices)
+
+
+def forward_intermediates(
+ model: nn.Module,
+ patch_extractor: Callable[[torch.Tensor], torch.Tensor],
+ norm: nn.Module,
+ num_summary_tokens: int,
+ num_cls_tokens: int,
+ x: torch.Tensor,
+ indices: Optional[Union[int, List[int], Tuple[int]]] = None,
+ return_prefix_tokens: bool = False,
+ stop_early: bool = False,
+ output_fmt: str = 'NCHW',
+ intermediates_only: bool = False,
+ aggregation: Optional[str] = "sparse",
+ inter_feature_normalizer: Optional[IntermediateFeatureNormalizerBase] = None,
+ norm_alpha_scheme = "post-alpha",
+ block_kwargs: Dict = None,
+) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
+ """ Forward features that returns intermediates.
+
+ The Dense layer aggregation method is inspired from the paper: "Dense Connector for MLLMs"
+ by Yao, Huanjin et al. (2024). arXiv preprint arXiv:2405.13800}
+
+ Args:
+ x: Input image tensor
+ indices: Take last n blocks if int, select matching indices if sequence
+ return_prefix_tokens: Return both prefix and spatial intermediate tokens
+ norm: Apply norm layer to all intermediates
+ stop_early: Stop iterating over blocks when last desired intermediate hit
+ output_fmt: Shape of intermediate feature outputs
+ intermediates_only: Only return intermediate features
+ aggregation: intermediate layer aggregation method (sparse or dense)
+ norm_alpha_scheme: apply alpha before ("pre-alpha") or after accumulation ("post-alpha")
+ Returns:
+ """
+ assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.'
+ assert aggregation in ('sparse', 'dense'), 'Aggregation must be one of sparse or dense.'
+ reshape = output_fmt == 'NCHW'
+ intermediates = []
+
+ block_kwargs = block_kwargs or dict()
+
+ blocks = model.blocks
+
+ take_indices, max_index = _take_indices(len(blocks), indices)
+ take_indices = sorted(take_indices)
+ # forward pass
+ B, _, height, width = x.shape
+
+ x = patch_extractor(x)
+
+ if stop_early:
+ blocks = blocks[:max_index + 1]
+
+ if inter_feature_normalizer is None or norm_alpha_scheme == 'none':
+ inter_feature_normalizer = NullIntermediateFeatureNormalizer.get_instance(x.dtype, x.device)
+
+ assert norm_alpha_scheme in ('none', 'pre-alpha', 'post-alpha'), f'Unsupported alpha scheme: {norm_alpha_scheme}'
+ post_alpha_scheme = norm_alpha_scheme == 'post-alpha'
+
+ accumulator = 0
+ alpha_sum = 0
+ num_accumulated = 0
+
+ take_off = 0
+
+ for i, blk in enumerate(blocks):
+ x = blk(x, **block_kwargs)
+ if aggregation == "dense":
+ # Arbitrarily use the rotation matrix from the final layer in the dense group
+ y, alpha = inter_feature_normalizer(x, i, rot_index=take_indices[take_off], skip=num_summary_tokens)
+ if post_alpha_scheme:
+ accumulator = accumulator + y
+ alpha_sum = alpha_sum + alpha
+ else:
+ accumulator = accumulator + (alpha * y)
+ alpha_sum += 1
+ num_accumulated += 1
+ if i == take_indices[take_off]:
+ if aggregation == "dense":
+ alpha = alpha_sum / num_accumulated
+ x_ = alpha * accumulator / num_accumulated
+ num_accumulated = 0
+ accumulator = 0
+ alpha_sum = 0
+ else:
+ y, alpha = inter_feature_normalizer(x, i, skip=num_summary_tokens)
+ x_ = alpha * y
+ # normalize intermediates with final norm layer if enabled
+ intermediates.append(norm(x_))
+ take_off = min(take_off + 1, len(take_indices) - 1)
+
+ # process intermediates
+
+ # split prefix (e.g. class, distill) and spatial feature tokens
+ prefix_tokens = [y[:, :num_cls_tokens] for y in intermediates]
+ intermediates = [y[:, num_summary_tokens:] for y in intermediates]
+
+ if reshape:
+ # reshape to BCHW output format
+ H = height // model.patch_size
+ W = width // model.patch_size
+ intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates]
+ if not torch.jit.is_scripting() and return_prefix_tokens:
+ # return_prefix not support in torchscript due to poor type handling
+ intermediates = list(zip(prefix_tokens, intermediates))
+ if intermediates_only:
+ return intermediates
+ x = norm(x)
+ return x, intermediates
diff --git a/tim/models/nvidia_radio/radio/hf_model.py b/tim/models/nvidia_radio/radio/hf_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..27bf97bf448be54ca098e857be940685d3b618d6
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/hf_model.py
@@ -0,0 +1,202 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from collections import namedtuple
+from typing import Callable, Dict, Optional, List, Union
+
+from timm.models import VisionTransformer
+import torch
+from torch import nn
+from transformers import PretrainedConfig, PreTrainedModel
+
+
+from .common import RESOURCE_MAP, DEFAULT_VERSION
+
+# Import all required modules.
+from .adaptor_base import AdaptorBase, RadioOutput, AdaptorInput
+from .adaptor_generic import GenericAdaptor, AdaptorBase
+from .adaptor_mlp import create_mlp_from_config
+from .adaptor_registry import adaptor_registry
+from .cls_token import ClsToken
+from .dinov2_arch import dinov2_vitg14_reg
+from .enable_cpe_support import enable_cpe
+from .enable_spectral_reparam import configure_spectral_reparam_from_args
+from .eradio_model import eradio
+from .feature_normalizer import FeatureNormalizer, IntermediateFeatureNormalizer
+from .forward_intermediates import forward_intermediates
+from .radio_model import create_model_from_args
+from .radio_model import RADIOModel as RADIOModelBase, Resolution
+from .input_conditioner import get_default_conditioner, InputConditioner
+from .open_clip_adaptor import OpenCLIP_RADIO
+from .vit_patch_generator import ViTPatchGenerator
+from .vitdet import apply_vitdet_arch, VitDetArgs
+
+# Register extra models
+from .extra_timm_models import *
+from .extra_models import *
+
+
+class RADIOConfig(PretrainedConfig):
+ """Pretrained Hugging Face configuration for RADIO models."""
+
+ def __init__(
+ self,
+ args: Optional[dict] = None,
+ version: Optional[str] = DEFAULT_VERSION,
+ patch_size: Optional[int] = None,
+ max_resolution: Optional[int] = None,
+ preferred_resolution: Optional[Resolution] = None,
+ adaptor_names: Union[str, List[str]] = None,
+ adaptor_configs: Dict[str, Dict[str, int]] = None,
+ vitdet_window_size: Optional[int] = None,
+ feature_normalizer_config: Optional[dict] = None,
+ inter_feature_normalizer_config: Optional[dict] = None,
+ **kwargs,
+ ):
+ self.args = args
+ for field in ["dtype", "amp_dtype"]:
+ if self.args is not None and field in self.args:
+ # Convert to a string in order to make it serializable.
+ # For example for torch.float32 we will store "float32",
+ # for "bfloat16" we will store "bfloat16".
+ self.args[field] = str(args[field]).split(".")[-1]
+ self.version = version
+ resource = RESOURCE_MAP[version]
+ self.patch_size = patch_size or resource.patch_size
+ self.max_resolution = max_resolution or resource.max_resolution
+ self.preferred_resolution = (
+ preferred_resolution or resource.preferred_resolution
+ )
+ self.adaptor_names = adaptor_names
+ self.adaptor_configs = adaptor_configs
+ self.vitdet_window_size = vitdet_window_size
+ self.feature_normalizer_config = feature_normalizer_config
+ self.inter_feature_normalizer_config = inter_feature_normalizer_config
+ super().__init__(**kwargs)
+
+
+
+class RADIOModel(PreTrainedModel):
+ """Pretrained Hugging Face model for RADIO.
+
+ This class inherits from PreTrainedModel, which provides
+ HuggingFace's functionality for loading and saving models.
+ """
+
+ config_class = RADIOConfig
+
+ def __init__(self, config: RADIOConfig):
+ super().__init__(config)
+
+ RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
+ args = RADIOArgs(**config.args)
+ self.config = config
+
+ model = create_model_from_args(args)
+ input_conditioner: InputConditioner = get_default_conditioner()
+
+ dtype = getattr(args, "dtype", torch.float32)
+ if isinstance(dtype, str):
+ # Convert the dtype's string representation back to a dtype.
+ dtype = getattr(torch, dtype)
+ model.to(dtype=dtype)
+ input_conditioner.dtype = dtype
+
+ summary_idxs = torch.tensor(
+ [i for i, t in enumerate(args.teachers) if t.get("use_summary", True)],
+ dtype=torch.int64,
+ )
+
+ adaptor_configs = config.adaptor_configs
+ adaptor_names = config.adaptor_names or []
+
+ adaptors = dict()
+ for adaptor_name in adaptor_names:
+ mlp_config = adaptor_configs[adaptor_name]
+ adaptor = GenericAdaptor(args, None, None, mlp_config)
+ adaptor.head_idx = mlp_config["head_idx"]
+ adaptors[adaptor_name] = adaptor
+
+ feature_normalizer = None
+ if config.feature_normalizer_config is not None:
+ # Actual normalization values will be restored when loading checkpoint weights.
+ feature_normalizer = FeatureNormalizer(config.feature_normalizer_config["embed_dim"])
+
+ inter_feature_normalizer = None
+ if config.inter_feature_normalizer_config is not None:
+ inter_feature_normalizer = IntermediateFeatureNormalizer(
+ config.inter_feature_normalizer_config["num_intermediates"],
+ config.inter_feature_normalizer_config["embed_dim"],
+ rot_per_layer=config.inter_feature_normalizer_config["rot_per_layer"],
+ dtype=dtype)
+
+ self.radio_model = RADIOModelBase(
+ model,
+ input_conditioner,
+ summary_idxs=summary_idxs,
+ patch_size=config.patch_size,
+ max_resolution=config.max_resolution,
+ window_size=config.vitdet_window_size,
+ preferred_resolution=config.preferred_resolution,
+ adaptors=adaptors,
+ feature_normalizer=feature_normalizer,
+ inter_feature_normalizer=inter_feature_normalizer,
+ )
+
+ @property
+ def adaptors(self) -> nn.ModuleDict:
+ return self.radio_model.adaptors
+
+ @property
+ def model(self) -> VisionTransformer:
+ return self.radio_model.model
+
+ @property
+ def input_conditioner(self) -> InputConditioner:
+ return self.radio_model.input_conditioner
+
+ @property
+ def num_summary_tokens(self) -> int:
+ return self.radio_model.num_summary_tokens
+
+ @property
+ def patch_size(self) -> int:
+ return self.radio_model.patch_size
+
+ @property
+ def max_resolution(self) -> int:
+ return self.radio_model.max_resolution
+
+ @property
+ def preferred_resolution(self) -> Resolution:
+ return self.radio_model.preferred_resolution
+
+ @property
+ def window_size(self) -> int:
+ return self.radio_model.window_size
+
+ @property
+ def min_resolution_step(self) -> int:
+ return self.radio_model.min_resolution_step
+
+ def make_preprocessor_external(self) -> Callable[[torch.Tensor], torch.Tensor]:
+ return self.radio_model.make_preprocessor_external()
+
+ def get_nearest_supported_resolution(self, height: int, width: int) -> Resolution:
+ return self.radio_model.get_nearest_supported_resolution(height, width)
+
+ def switch_to_deploy(self):
+ return self.radio_model.switch_to_deploy()
+
+ def forward(self, x: torch.Tensor):
+ return self.radio_model.forward(x)
diff --git a/tim/models/nvidia_radio/radio/input_conditioner.py b/tim/models/nvidia_radio/radio/input_conditioner.py
new file mode 100644
index 0000000000000000000000000000000000000000..63ad07a0c95ee1f45e0a977f59de5252d16e68ef
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/input_conditioner.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+from typing import Union, Tuple
+
+import torch
+from torch import nn
+
+
+norm_t = Union[Tuple[float, float, float], torch.Tensor]
+
+class InputConditioner(nn.Module):
+ def __init__(self,
+ input_scale: float,
+ norm_mean: norm_t,
+ norm_std: norm_t,
+ dtype: torch.dtype = None,
+ ):
+ super().__init__()
+
+ self.dtype = dtype
+
+ self.register_buffer("norm_mean", _to_tensor(norm_mean) / input_scale)
+ self.register_buffer("norm_std", _to_tensor(norm_std) / input_scale)
+
+ def forward(self, x: torch.Tensor):
+ y = (x - self.norm_mean) / self.norm_std
+ if self.dtype is not None:
+ y = y.to(self.dtype)
+ return y
+
+
+def get_default_conditioner():
+ from timm.data.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
+
+ return InputConditioner(
+ input_scale=1.0,
+ norm_mean=OPENAI_CLIP_MEAN,
+ norm_std=OPENAI_CLIP_STD,
+ )
+
+
+def _to_tensor(v: norm_t):
+ return torch.as_tensor(v, dtype=torch.float32).view(-1, 1, 1)
diff --git a/tim/models/nvidia_radio/radio/open_clip_adaptor.py b/tim/models/nvidia_radio/radio/open_clip_adaptor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef8e80c821e055c7f6bcb6696cf0138bd0bd2054
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/open_clip_adaptor.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+from argparse import Namespace
+
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from .adaptor_registry import adaptor_registry, dict_t, state_t
+
+from .adaptor_generic import GenericAdaptor
+
+
+class OpenCLIP_RADIO(GenericAdaptor):
+ def __init__(self, main_config: Namespace, adaptor_config: dict_t, state: state_t):
+ super().__init__(main_config, adaptor_config, state)
+
+ import open_clip
+
+ self.oc_model = open_clip.create_model_from_pretrained(
+ model_name=adaptor_config['model'],
+ pretrained=adaptor_config['pretrained'],
+ return_transform=False,
+ )
+ # Unload these parameters
+ self.oc_model.visual = None
+
+ self.tokenizer = open_clip.get_tokenizer(model_name=adaptor_config['model'])
+
+ def encode_text(self, text, normalize: bool = False):
+ return self.oc_model.encode_text(text, normalize=normalize)
+
+
+@adaptor_registry.register_adaptor("open_clip")
+def create_open_clip_adaptor(main_config: Namespace, adaptor_config: dict_t, state: state_t):
+ return OpenCLIP_RADIO(main_config, adaptor_config, state)
diff --git a/tim/models/nvidia_radio/radio/radio_model.py b/tim/models/nvidia_radio/radio/radio_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1691dbeb2b2bc2d28724c63a8f4b345d8aef69f
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/radio_model.py
@@ -0,0 +1,375 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+from typing import Callable, Dict, Iterable, List, NamedTuple, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from timm.models import create_model, VisionTransformer
+from types import MethodType
+
+from .enable_cpe_support import enable_cpe
+from .input_conditioner import InputConditioner
+from .adaptor_base import AdaptorBase, RadioOutput, AdaptorInput
+from . import eradio_model
+from .enable_spectral_reparam import configure_spectral_reparam_from_args
+from .feature_normalizer import FeatureNormalizer, IntermediateFeatureNormalizer
+from . import dual_hybrid_vit
+
+
+class Resolution(NamedTuple):
+ height: int
+ width: int
+
+
+class RADIOModel(nn.Module):
+ def __init__(
+ self,
+ model: nn.Module,
+ input_conditioner: InputConditioner,
+ patch_size: int,
+ max_resolution: int,
+ preferred_resolution: Resolution,
+ summary_idxs: Optional[torch.Tensor] = None,
+ window_size: int = None,
+ adaptors: Dict[str, AdaptorBase] = None,
+ feature_normalizer: Optional[FeatureNormalizer] = None,
+ inter_feature_normalizer: Optional[IntermediateFeatureNormalizer] = None,
+ ):
+ super().__init__()
+
+ self.model = model
+ self.input_conditioner = input_conditioner
+ if summary_idxs is not None:
+ self.register_buffer('summary_idxs', summary_idxs)
+ else:
+ self.summary_idxs = None
+
+ self._preferred_resolution = preferred_resolution
+ self._patch_size = patch_size
+ self._max_resolution = max_resolution
+ self._window_size = window_size
+
+ adaptors = adaptors or dict()
+ self.adaptors = nn.ModuleDict(adaptors)
+
+ if feature_normalizer is None:
+ feature_normalizer = nn.Identity()
+ self.feature_normalizer = feature_normalizer
+ self.inter_feature_normalizer = inter_feature_normalizer
+
+ @property
+ def num_summary_tokens(self) -> int:
+ if hasattr(self.model, 'num_summary_tokens'):
+ return self.model.num_summary_tokens
+
+ patch_gen = getattr(self.model, "patch_generator", None)
+ if patch_gen is not None:
+ return patch_gen.num_skip
+ elif getattr(self.model, 'global_pool', None) == 'avg':
+ return 0
+ return 1
+
+ @property
+ def num_cls_tokens(self) -> int:
+ if hasattr(self.model, 'num_cls_tokens'):
+ return self.model.num_cls_tokens
+
+ patch_gen = getattr(self.model, 'patch_generator', None)
+ if patch_gen is not None:
+ return patch_gen.num_cls_tokens
+ elif getattr(self.model, 'global_pool', None) == 'avg':
+ return 0
+ return 1
+
+ @property
+ def patch_size(self) -> int:
+ if self._patch_size is not None:
+ return self._patch_size
+ if hasattr(self.model, "patch_size"):
+ return self.model.patch_size
+ patch_gen = getattr(self.model, "patch_generator", None)
+ if patch_gen is not None:
+ return patch_gen.patch_size
+ return None
+
+ @property
+ def max_resolution(self) -> int:
+ return self._max_resolution
+
+ @property
+ def preferred_resolution(self) -> Resolution:
+ return self._preferred_resolution
+
+ @property
+ def window_size(self) -> int:
+ return self._window_size
+
+ @property
+ def min_resolution_step(self) -> int:
+ res = self.patch_size
+ if self.window_size is not None:
+ res *= self.window_size
+ return res
+
+ @property
+ def blocks(self) -> Iterable[nn.Module]:
+ blocks = getattr(self.model, 'blocks', None)
+ if blocks is not None:
+ return blocks
+ return None
+
+ @property
+ def embed_dim(self) -> int:
+ return self.model.embed_dim
+
+ def make_preprocessor_external(self) -> Callable[[torch.Tensor], torch.Tensor]:
+ ret = self.input_conditioner
+ self.input_conditioner = nn.Identity()
+ return ret
+
+ def get_nearest_supported_resolution(self, height: int, width: int) -> Resolution:
+ height = int(round(height / self.min_resolution_step) * self.min_resolution_step)
+ width = int(round(width / self.min_resolution_step) * self.min_resolution_step)
+
+ height = max(height, self.min_resolution_step)
+ width = max(width, self.min_resolution_step)
+
+ return Resolution(height=height, width=width)
+
+ def switch_to_deploy(self):
+ fn = getattr(self.model, 'switch_to_deploy', None)
+ if fn is not None:
+ fn()
+
+ def forward(self, x: torch.Tensor, feature_fmt: str = 'NLC') -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
+ '''
+ Forward process for model.
+ Args:
+ x: Input tensor. Unless `make_preprocessor_external` has been called, then the dynamic range of `x` is expected to be `[0, 1]`,
+ otherwise `x` is expected to be mean centered with unit standard deviation.
+ feature_format: ['NLC', 'NCHW'] - The output format for the features.
+ '''
+ res_step = self.min_resolution_step
+ if res_step is not None and (x.shape[-2] % res_step != 0 or x.shape[-1] % res_step != 0):
+ raise ValueError('The input resolution must be a multiple of `self.min_resolution_step`. '
+ '`self.get_nearest_supported_resolution(, ) is provided as a convenience API. '
+ f'Input: {x.shape[-2:]}, Nearest: {self.get_nearest_supported_resolution(*x.shape[-2:])}')
+
+ x = self.input_conditioner(x)
+ y = self.model.forward_features(x)
+ ret = self._extract_final(x, y, feature_fmt=feature_fmt)
+ return ret
+
+ def forward_pack(self, x: List[torch.Tensor], feature_fmt: str = 'NLC') -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
+ '''
+ Forward process for model.
+ Args:
+ x: Input tensor. Unless `make_preprocessor_external` has been called, then the dynamic range of `x` is expected to be `[0, 1]`,
+ otherwise `x` is expected to be mean centered with unit standard deviation.
+ feature_format: ['NLC', 'NCHW'] - The output format for the features.
+ '''
+ res_step = self.min_resolution_step
+ for _x in x:
+ if res_step is not None and (_x.shape[-2] % res_step != 0 or _x.shape[-1] % res_step != 0):
+ raise ValueError('The input resolution must be a multiple of `self.min_resolution_step`. '
+ '`self.get_nearest_supported_resolution(, ) is provided as a convenience API. '
+ f'Input: {_x.shape[-2:]}, Nearest: {self.get_nearest_supported_resolution(*_x.shape[-2:])}')
+
+ x = [self.input_conditioner(_x) for _x in x]
+ y, cu_seqlens = self.model.forward_features(x)
+ all_summary, spatial_features = [], []
+ num_cls_tokens = self.model.patch_generator.num_cls_tokens
+ num_skip = self.model.patch_generator.num_skip
+ for i in range(len(cu_seqlens)-1):
+ summary = y[cu_seqlens[i]: cu_seqlens[i+1]][: num_cls_tokens]
+ all_feat = y[cu_seqlens[i]: cu_seqlens[i+1]][num_skip :]
+ all_summary.append(summary)
+ spatial_features.append(all_feat)
+ all_summary = torch.cat(all_summary)
+ spatial_features = torch.cat(spatial_features)
+ return all_summary, spatial_features
+
+ def _extract_final(self, x: torch.Tensor, y: torch.Tensor, feature_fmt: str = 'NLC'):
+ if isinstance(self.model, VisionTransformer):
+ patch_gen = getattr(self.model, "patch_generator", None)
+ if patch_gen is not None:
+ all_summary = y[:, : patch_gen.num_cls_tokens]
+ if self.summary_idxs is not None:
+ bb_summary = all_summary[:, self.summary_idxs]
+ else:
+ bb_summary = all_summary
+ all_feat = y[:, patch_gen.num_skip :]
+ elif self.model.global_pool == "avg":
+ all_summary = y[:, self.model.num_prefix_tokens :].mean(dim=1)
+ bb_summary = all_summary
+ all_feat = y
+ else:
+ all_summary = y[:, 0]
+ bb_summary = all_summary
+ all_feat = y[:, 1:]
+ elif isinstance(self.model, eradio_model.ERADIO):
+ _, f = y
+ all_feat = f.flatten(2).transpose(1, 2)
+ all_summary = all_feat.mean(dim=1)
+ bb_summary = all_summary
+ elif isinstance(y, (list, tuple)):
+ all_summary, all_feat = y
+ bb_summary = all_summary
+ else:
+ all_summary = y[:, :self.num_cls_tokens]
+ if self.summary_idxs is not None and all_summary.shape[1] > 1:
+ if all_summary.shape[1] == 1:
+ # Create dummy duplicates
+ all_summary = all_summary.expand(-1, 128, -1)
+ bb_summary = all_summary[:, self.summary_idxs]
+ else:
+ bb_summary = all_summary
+ all_feat = y[:, self.num_summary_tokens:]
+
+ all_feat = self.feature_normalizer(all_feat)
+
+ if feature_fmt == 'NCHW':
+ fmt_feat = (all_feat.reshape(all_feat.shape[0], x.shape[-2] // self.patch_size, x.shape[-1] // self.patch_size, all_feat.shape[2])
+ .permute(0, 3, 1, 2)
+ )
+ elif feature_fmt == 'NLC':
+ fmt_feat = all_feat
+ else:
+ raise ValueError(f'Unsupported feature_fmt: {feature_fmt}. Must be one of ["NLC", "NCHW"]')
+
+ ret = RadioOutput(bb_summary.flatten(1), fmt_feat)
+
+ if self.adaptors:
+ ret = dict(backbone=ret)
+ for name, adaptor in self.adaptors.items():
+ if all_summary.ndim == 3:
+ if all_summary.shape[1] == 1:
+ summary = all_summary[:, 0]
+ else:
+ summary = all_summary[:, adaptor.head_idx]
+ else:
+ summary = all_summary
+ ada_input = AdaptorInput(images=x, summary=summary.float(), features=all_feat, feature_fmt=feature_fmt, patch_size=self.patch_size)
+ v = adaptor(ada_input).to(torch.float32)
+ ret[name] = v
+
+ return ret
+
+ def forward_intermediates(
+ self,
+ x: torch.Tensor,
+ indices: Optional[Union[int, List[int], Tuple[int]]] = None,
+ return_prefix_tokens: bool = False,
+ norm: bool = False,
+ stop_early: bool = False,
+ output_fmt: str = 'NCHW',
+ intermediates_only: bool = False,
+ aggregation: Optional[str] = "sparse",
+ norm_alpha_scheme: Optional[str] = "post-alpha",
+ ) -> List[RadioOutput]:
+ """ Forward features that returns intermediates.
+ Args:
+ x: Input image tensor
+ indices: Take last n blocks if int, select matching indices if sequence
+ return_prefix_tokens: Return both prefix and spatial intermediate tokens
+ norm: Apply norm layer to all intermediates
+ stop_early: Stop iterating over blocks when last desired intermediate hit
+ output_fmt: Shape of intermediate feature outputs. Options: NCHW, NLC
+ intermediates_only: Only return intermediate features
+ aggregation: intermediate layer aggregation method (sparse or dense).
+ Dense accumulation is done by averaging the features in each group.
+ norm_alpha_scheme: apply alpha before ("pre-alpha") or after accumulation ("post-alpha"), or don't normalize ("none")
+ Only affects dense aggregation
+ Returns:
+ List of RadioOutput objects.
+ """
+ x = self.input_conditioner(x)
+ intermediates = self.model.forward_intermediates(
+ x,
+ indices=indices,
+ return_prefix_tokens=return_prefix_tokens,
+ norm=norm,
+ stop_early=stop_early,
+ output_fmt=output_fmt,
+ intermediates_only=intermediates_only,
+ aggregation=aggregation,
+ inter_feature_normalizer=self.inter_feature_normalizer,
+ norm_alpha_scheme=norm_alpha_scheme,
+ )
+
+ if not intermediates_only:
+ final, intermediates = intermediates
+
+ def prepare_summary(summ: Optional[torch.Tensor]):
+ if summ is None:
+ return summ
+ if self.summary_idxs is not None and summ.shape[1] > 1:
+ summ = summ[:, self.summary_idxs]
+ return summ.flatten(1)
+
+ if return_prefix_tokens:
+ radio_outputs = [
+ RadioOutput(prepare_summary(summary), features)
+ for summary, features in intermediates
+ ]
+ else:
+ radio_outputs = intermediates
+
+ if intermediates_only:
+ return radio_outputs
+ else:
+ final = self._extract_final(x, final, feature_fmt=output_fmt)
+ return final, radio_outputs
+
+
+
+def create_model_from_args(args) -> nn.Module:
+ in_chans = 3
+ if args.in_chans is not None:
+ in_chans = args.in_chans
+ elif args.input_size is not None:
+ in_chans = args.input_size[0]
+
+ # Skip weight initialization unless it's explicitly requested.
+ weight_init = args.model_kwargs.pop("weight_init", "skip")
+
+ model = create_model(
+ args.model,
+ pretrained=args.pretrained,
+ in_chans=in_chans,
+ num_classes=args.num_classes,
+ drop_rate=args.drop,
+ drop_path_rate=args.drop_path,
+ drop_block_rate=args.drop_block,
+ global_pool=args.gp,
+ bn_momentum=args.bn_momentum,
+ bn_eps=args.bn_eps,
+ scriptable=args.torchscript,
+ checkpoint_path=args.initial_checkpoint,
+ weight_init=weight_init,
+ **args.model_kwargs,
+ )
+
+ if hasattr(model, 'norm') and not getattr(args, 'model_norm', False):
+ model.norm = nn.Identity()
+
+ model.head = nn.Identity()
+
+ if args.cpe_max_size is not None:
+ uq_teachers = set(t['name'] for t in args.teachers)
+ enable_cpe(
+ model,
+ args.cpe_max_size,
+ num_cls_tokens=len(uq_teachers) if args.cls_token_per_teacher else 1,
+ register_multiple=getattr(args, 'register_multiple', None),
+ num_registers=getattr(args, 'cpe_num_registers', None),
+ support_packing=args.support_packing,
+ )
+
+ return model
diff --git a/tim/models/nvidia_radio/radio/vision_transformer_xpos.py b/tim/models/nvidia_radio/radio/vision_transformer_xpos.py
new file mode 100644
index 0000000000000000000000000000000000000000..245e5aed8f769e3b18401da7bead82a0f82df8d3
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/vision_transformer_xpos.py
@@ -0,0 +1,357 @@
+import math
+from typing import Final, List, Optional, Tuple, Union
+
+
+from einops import rearrange
+from timm.models import register_model
+import torch
+from torch import Type, nn
+from torch.nn import functional as F
+from torch.nn.init import xavier_normal_, xavier_uniform_, zeros_
+
+from .forward_intermediates import forward_intermediates
+
+
+def _get_init_scale(num_encoder_layers: int, num_decoder_layers: int, is_encoder: bool):
+ if num_encoder_layers > 0 and num_decoder_layers == 0:
+ return math.sqrt(math.log(2 * num_encoder_layers))
+ if num_decoder_layers > 0 and num_encoder_layers == 0:
+ return math.sqrt(math.log(2 * num_decoder_layers))
+ if is_encoder:
+ # Both encoders and decoders
+ return math.sqrt(
+ 0.33 * math.log(3 * num_decoder_layers) * math.log(2 * num_encoder_layers)
+ )
+
+ return math.sqrt(math.log(3 * num_decoder_layers))
+
+
+# [1,2] [1,1,2,2]
+# [3,4] -> [3,3,4,4]
+# [5,6] [5,5,6,6]
+def duplicate_interleave(m):
+ return m.view(-1, 1).repeat(1, 2).view(m.shape[0], -1)
+
+# 0,1,2,3,4,5,6,7 -> -1,0,-3,2,-5,4,-7,6
+def rotate_every_two(x):
+ x1 = x[:, :, ::2]
+ x2 = x[:, :, 1::2]
+ x = torch.stack((-x2, x1), dim=-1)
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
+
+
+class XPosEmbedding2D(torch.nn.Module):
+ """Implementation of xPos based on RotaryEmbedding from GPT-NeoX.
+ This implementation is designed to operate on queries and keys that are compatible with
+ [batch_size, n_heads_per_partition, seq_len, head_dim] (e.g. MinGPTAttention format).
+ """
+
+ def __init__(
+ self,
+ head_dim: int,
+ base=50000,
+ scale_base=512
+ ):
+ super().__init__()
+ half_dim = head_dim // 2
+ self.half_dim = half_dim
+ inv_freq = 1.0 / (base ** (torch.arange(0, half_dim, 2).float() / half_dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.head_dim = head_dim
+ self.token_shape_cached = None
+ self.batch_size_cached = None
+ self.cos_cached: torch.Tensor | None = None
+ self.sin_cached: torch.Tensor | None = None
+ self.scale_cached: torch.Tensor | None = None
+ self.scale_base = scale_base
+ self.register_buffer("scale",
+ (torch.arange(0, half_dim, 2) + 0.4 * half_dim) / (1.4 * half_dim))
+
+ def cos_sin(
+ self,
+ token_shape: Tuple[int, int],
+ device="cuda",
+ dtype=torch.bfloat16,
+ ) -> torch.Tensor:
+ if token_shape != self.token_shape_cached:
+ self.token_shape_cached = token_shape
+ y = torch.arange(token_shape[0], device=device, dtype=self.inv_freq.dtype)
+ x = torch.arange(token_shape[1], device=device, dtype=self.inv_freq.dtype)
+ x, y = torch.meshgrid(x, y, indexing='xy')
+
+ y_freqs = torch.einsum("i,j->ij", y.flatten(), self.inv_freq)
+ x_freqs = torch.einsum("i,j->ij", x.flatten(), self.inv_freq)
+
+ y_scales = self.scale ** y.flatten().div(self.scale_base)[:, None]
+ x_scales = self.scale ** x.flatten().div(self.scale_base)[:, None]
+
+ freqs = torch.cat([y_freqs, x_freqs], dim=-1)
+ emb = torch.repeat_interleave(freqs, repeats=2, dim=-1)
+
+ scales = torch.cat([y_scales, x_scales], dim=-1)
+ scales = torch.repeat_interleave(scales, repeats=2, dim=-1)
+
+ if dtype in [torch.float16, torch.bfloat16]:
+ emb = emb.float()
+
+ self.cos_cached = emb.cos()[None, :, :]
+ self.sin_cached = emb.sin()[None, :, :]
+ self.scale_cached = scales[None, :, :]
+
+ self.cos_cached = self.cos_cached.type(dtype)
+ self.sin_cached = self.sin_cached.type(dtype)
+ self.scale_cached = self.scale_cached.type(dtype)
+
+ return self.cos_cached, self.sin_cached, self.scale_cached
+
+ def forward(self, q: torch.Tensor, k: torch.Tensor, token_shape: Tuple[int, int]):
+ batch, seq_len, head_dim = q.shape
+ cos, sin, scale = self.cos_sin(token_shape, q.device, q.dtype)
+ # scale = self.scale**torch.arange(seq_len).to(self.scale).div(self.scale_base)[:, None]
+ # scale = torch.repeat_interleave(scale, 2, dim=-1).to(q.device)
+ # scale = torch.cat([scale, scale], dim=-1)
+ # scale = 1
+ return (
+ (q * cos * scale) + (rotate_every_two(q) * sin * scale),
+ (k * cos * (1 / scale)) + (rotate_every_two(k) * sin * (1 / scale)),
+ )
+
+
+class MagnetoAttention(nn.Module):
+ def __init__(self, d_model: int, n_head: int, pos_emb: XPosEmbedding2D):
+ super().__init__()
+ self.num_heads = n_head
+ self.head_dim = d_model // n_head
+ self.scale = self.head_dim ** -0.5
+
+ self.qkv = nn.Linear(d_model, d_model * 3, bias=False)
+ self.proj = nn.Linear(d_model, d_model)
+ self.pos_emb = pos_emb
+
+ self.norm0 = nn.LayerNorm(d_model)
+ self.norm1 = nn.LayerNorm(d_model)
+
+ def forward(self, x: torch.Tensor, num_prefix_tokens: int, patch_shape: Tuple[int, int]) -> torch.Tensor:
+ B, N, C = x.shape
+ x = self.norm0(x)
+
+ qkv = self.qkv(x).reshape(B, N, 3, C).permute(2, 0, 1, 3)
+ q, k, v = qkv.unbind(0)
+
+ q_pref = q[:, :num_prefix_tokens]
+ q_patch = q[:, num_prefix_tokens:]
+
+ k_pref = k[:, :num_prefix_tokens]
+ k_patch = k[:, num_prefix_tokens:]
+
+ q_patch, k_patch = self.pos_emb(q_patch, k_patch, patch_shape)
+
+ q = torch.cat([q_pref, q_patch], dim=1)
+ k = torch.cat([k_pref, k_patch], dim=1)
+
+ def head_reshape(t: torch.Tensor):
+ return t.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
+
+ q = head_reshape(q)
+ k = head_reshape(k)
+ v = head_reshape(v)
+
+ x = F.scaled_dot_product_attention(q, k, v)
+ x = x.transpose(1, 2).reshape(B, N, C)
+ x = self.norm1(x)
+ x = self.proj(x)
+ return x
+
+ def _reset_parameters(self):
+ xavier_uniform_(self.qkv.weight)
+ if self.qkv.bias is not None:
+ zeros_(self.qkv.bias)
+ xavier_normal_(self.proj.weight)
+ zeros_(self.proj.bias)
+
+
+class MagnetoTransformerEncoderLayer(nn.Module):
+ def __init__(self, d_model: int, nhead: int, pos_emb: XPosEmbedding2D,
+ num_encoder_layers: int, num_decoder_layers: int = 0,
+ dim_mhsa: int = 0,
+ dim_feedforward: int = 2048,
+ layer_norm_eps: float = 1e-5,
+ batch_first: bool = True):
+ super().__init__()
+
+ if dim_mhsa == 0:
+ dim_mhsa = d_model
+
+ self._num_encoder_layers = num_encoder_layers
+ self._num_decoder_layers = num_decoder_layers
+
+ self.attn = MagnetoAttention(d_model, nhead, pos_emb)
+
+ self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
+ self.linear2 = nn.Linear(d_model, dim_feedforward)
+ self.norm3 = nn.LayerNorm(dim_feedforward, eps=layer_norm_eps)
+ self.linear3 = nn.Linear(dim_feedforward, d_model)
+
+ def initialize(self):
+ gamma = _get_init_scale(self._num_encoder_layers, self._num_decoder_layers, is_encoder=True)
+
+ # Magneto Initialization
+ for mod in self.children():
+ if isinstance(mod, nn.Linear):
+ xavier_normal_(mod.weight.data, gamma)
+ elif isinstance(mod, MagnetoAttention):
+ mod._reset_parameters()
+
+ def forward(self, x: torch.Tensor, num_prefix_tokens: int, patch_shape: Tuple[int, int]) -> torch.Tensor:
+ x = x + self._sa_block(x, num_prefix_tokens, patch_shape)
+ x = x + self._ff_block(x)
+ return x
+
+ def _sa_block(self, x: torch.Tensor, num_prefix_tokens: int, patch_shape: Tuple[int, int]) -> torch.Tensor:
+ x = self.attn(x, num_prefix_tokens, patch_shape)
+ return x
+
+ def _ff_block(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.norm2(x)
+ x = self.linear2(x)
+ x = F.gelu(x)
+ x = self.norm3(x)
+ x = self.linear3(x)
+ return x
+
+
+class VisionTransformer(nn.Module):
+ """ Vision Transformer
+
+ A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
+ - https://arxiv.org/abs/2010.11929
+ """
+ dynamic_img_size: Final[bool]
+
+ def __init__(
+ self,
+ patch_size: Union[int, Tuple[int, int]] = 16,
+ in_chans: int = 3,
+ embed_dim: int = 768,
+ depth: int = 12,
+ num_heads: int = 12,
+ mlp_ratio: float = 4.,
+ num_cls_tokens: int = 1,
+ num_reg_tokens: int = 0,
+ ) -> None:
+ """
+ Args:
+ patch_size: Patch size.
+ in_chans: Number of image input channels.
+ embed_dim: Transformer embedding dimension.
+ depth: Depth of transformer.
+ num_heads: Number of attention heads.
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
+ num_cls_tokens: Number of cls tokens
+ num_reg_tokens: Number of register tokens.
+ block_fn: Transformer block layer.
+ """
+ super().__init__()
+
+ self.patch_size = patch_size
+ self.embed_dim = embed_dim
+ self.num_cls_tokens = num_cls_tokens
+ self.num_reg_tokens = num_reg_tokens
+
+ self.patch_embed = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+ self.prefix_buffer = nn.Parameter(torch.randn(1, self.num_prefix_tokens, embed_dim) * .02)
+
+ pos_emb = XPosEmbedding2D(embed_dim)
+
+ self.blocks = nn.ModuleList([
+ MagnetoTransformerEncoderLayer(
+ d_model=embed_dim,
+ nhead=num_heads,
+ num_encoder_layers=depth,
+ num_decoder_layers=0,
+ dim_feedforward=int(embed_dim * mlp_ratio),
+ pos_emb=pos_emb,
+ )
+ for _ in range(depth)
+ ])
+
+ for block in self.blocks:
+ block.initialize()
+
+ @property
+ def num_prefix_tokens(self):
+ return self.num_cls_tokens + self.num_reg_tokens
+
+ @property
+ def num_summary_tokens(self):
+ return self.num_prefix_tokens
+
+ def forward_features(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ x, patch_shape = self._patchify(x)
+
+ for block in self.blocks:
+ x = block(x, self.num_prefix_tokens, patch_shape)
+
+ summary = x[:, :self.num_cls_tokens]
+ features = x[:, self.num_prefix_tokens:]
+
+ return summary, features
+
+ def forward_intermediates(self, x: torch.Tensor, norm: bool = False, **kwargs):
+ patch_shape = tuple(d // self.patch_size for d in x.shape[-2:])
+
+ def patch_extractor(x: torch.Tensor):
+ x, _ = self._patchify(x)
+ return x
+
+ return forward_intermediates(
+ self,
+ patch_extractor=patch_extractor,
+ num_summary_tokens=self.num_prefix_tokens,
+ num_cls_tokens=self.num_cls_tokens,
+ norm=lambda y: y,
+ x=x,
+ block_kwargs=dict(num_prefix_tokens=self.num_prefix_tokens, patch_shape=patch_shape),
+ **kwargs,
+ )
+
+ def _patchify(self, x: torch.Tensor):
+ x = self.patch_embed(x)
+ patch_shape = x.shape[-2:]
+ x = rearrange(x, 'b c h w -> b (h w) c')
+
+ prefix = self.prefix_buffer.expand(x.shape[0], -1, -1)
+
+ x = torch.cat([prefix, x], dim=1)
+ return x, patch_shape
+
+
+@register_model
+def vit_base_patch16_xpos(num_cls_tokens: int = 1, num_reg_tokens: int = 0, **kwargs) -> VisionTransformer:
+ return VisionTransformer(patch_size=16, embed_dim=768, depth=12, num_heads=12,
+ num_cls_tokens=num_cls_tokens, num_reg_tokens=num_reg_tokens)
+
+
+@register_model
+def vit_large_patch16_xpos(num_cls_tokens: int = 1, num_reg_tokens: int = 0, **kwargs) -> VisionTransformer:
+ return VisionTransformer(patch_size=16, embed_dim=1024, depth=24, num_heads=16,
+ num_cls_tokens=num_cls_tokens, num_reg_tokens=num_reg_tokens)
+
+
+@register_model
+def vit_huge_patch16_xpos(num_cls_tokens: int = 1, num_reg_tokens: int = 0, **kwargs) -> VisionTransformer:
+ return VisionTransformer(patch_size=16, embed_dim=1280, depth=32, num_heads=16,
+ num_cls_tokens=num_cls_tokens, num_reg_tokens=num_reg_tokens)
+
+
+@register_model
+def vit_giant_patch16_xpos(num_cls_tokens: int = 1, num_reg_tokens: int = 0, **kwargs) -> VisionTransformer:
+ return VisionTransformer(patch_size=16, embed_dim=1408, depth=40, num_heads=16,
+ num_cls_tokens=num_cls_tokens, num_reg_tokens=num_reg_tokens)
+
+
+@register_model
+def vit_bigG_patch16_xpos(num_cls_tokens: int = 1, num_reg_tokens: int = 0, **kwargs) -> VisionTransformer:
+ return VisionTransformer(patch_size=16, embed_dim=1664, depth=48, num_heads=16,
+ num_cls_tokens=num_cls_tokens, num_reg_tokens=num_reg_tokens)
\ No newline at end of file
diff --git a/tim/models/nvidia_radio/radio/vit_patch_generator.py b/tim/models/nvidia_radio/radio/vit_patch_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a2da061e2eb5ef74b994121f33d7f6e9c72826f
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/vit_patch_generator.py
@@ -0,0 +1,287 @@
+# Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+
+import math
+from typing import Union, Tuple, Optional
+
+import torch
+import torch.nn.functional as F
+from torch import nn
+from einops import rearrange
+
+from .cls_token import ClsToken
+
+input_dim_t = Union[int, Tuple[int, int]]
+
+try:
+ # raise ImportError()
+ from indirect_grid_sample import indirect_grid_sample
+except ImportError:
+ indirect_grid_sample = None
+
+class ViTPatchGenerator(nn.Module):
+ def __init__(self,
+ patch_size: int,
+ embed_dim: int,
+ input_dims: input_dim_t,
+ abs_pos: bool = True,
+ normalize_patches: bool = False,
+ cls_token: bool = False,
+ max_input_dims: Optional[input_dim_t] = None,
+ pos_dropout: float = 0.0,
+ return_pos_enc: bool = False,
+ num_cls_tokens: int = 1,
+ register_multiple: Optional[int] = None,
+ num_registers: Optional[int] = None,
+ patch_bias: bool = False,
+ device=None, dtype=None,
+ ):
+ super().__init__()
+
+ if isinstance(input_dims, int):
+ input_dims = (input_dims, input_dims)
+
+ if max_input_dims is None:
+ max_input_dims = input_dims
+ if isinstance(max_input_dims, int):
+ max_input_dims = (max_input_dims, max_input_dims)
+
+ max_input_dims = tuple(
+ int(math.ceil(d / patch_size) * patch_size)
+ for d in max_input_dims
+ )
+
+ self.cpe_mode = max_input_dims != input_dims
+ self.pos_dropout = pos_dropout
+ self.return_pos_enc = return_pos_enc
+
+ factory = dict(device=device, dtype=dtype)
+
+ self.patch_size = patch_size
+ self.abs_pos = abs_pos
+ self.embed_dim = embed_dim
+
+ self.num_rows = max_input_dims[0] // patch_size
+ self.num_cols = max_input_dims[1] // patch_size
+ self.input_dims = tuple(d // patch_size for d in input_dims)
+ self.num_patches = self.num_rows * self.num_cols
+ self.max_input_dims = max_input_dims
+
+ self.im_to_patches = Im2Patches(patch_size)
+ self.embedder = ViTPatchLinear(patch_size, embed_dim, bias=patch_bias, **factory)
+
+ if abs_pos:
+ scale = embed_dim ** -0.5
+ self.pos_embed = nn.Parameter(torch.randn(1, self.num_patches, embed_dim, **factory) * scale)
+
+ self.cls_token = ClsToken(
+ embed_dim,
+ num_tokens=num_cls_tokens,
+ enabled=cls_token,
+ register_multiple=register_multiple,
+ num_registers=num_registers,
+ )
+
+ self.patch_normalizer = nn.LayerNorm(embed_dim) if normalize_patches else nn.Identity()
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ patches = self.embed_patches(x)
+ patches, pos_enc = self.apply_pos_enc(patches, input_size=x.shape[2:])
+ patches = self.cls_token(patches)
+ patches = self.patch_normalizer(patches)
+ if self.return_pos_enc:
+ return patches, pos_enc
+ return patches
+
+ @property
+ def apply_cls_token(self):
+ return self.cls_token.enabled
+
+ @property
+ def num_cls_tokens(self):
+ return self.cls_token.num_tokens
+
+ @property
+ def num_cls_patches(self):
+ return self.cls_token.num_patches
+
+ @property
+ def num_registers(self):
+ return self.cls_token.num_registers
+
+ @property
+ def num_skip(self):
+ return self.num_cls_tokens + self.num_registers
+
+ def no_weight_decay(self):
+ return [
+ 'pos_embed',
+ ]
+
+ def _load_embed(self, src_embed: torch.Tensor, targ_embed: nn.Parameter):
+ if src_embed.shape != targ_embed.shape:
+ src_size = int(math.sqrt(src_embed.shape[1]))
+
+ assert src_size ** 2 == src_embed.shape[1], 'Unable to interpolate non-square embedding'
+
+ src_embed = rearrange(src_embed, 'b (h w) c -> b c h w', h=src_size, w=src_size)
+ src_embed = F.interpolate(src_embed, size=(self.num_rows, self.num_cols), mode='bicubic', align_corners=True, antialias=False)
+ src_embed = rearrange(src_embed, 'b c h w -> b (h w) c')
+ targ_embed.data.copy_(src_embed)
+
+ def _load_projection(self, src_proj_weight: torch.Tensor, targ_proj_weight: torch.Tensor):
+ if src_proj_weight.shape != targ_proj_weight.shape:
+ src_patch_size = int(math.sqrt(src_proj_weight.shape[1] // 3))
+
+ assert (src_patch_size ** 2) * 3 == src_proj_weight.shape[1], 'Unable to interpolate non-square patch size'
+
+ src_proj_weight = rearrange(src_proj_weight, 'b (c h w) -> b c h w', c=3, h=src_patch_size, w=src_patch_size)
+ src_proj_weight = F.interpolate(src_proj_weight, size=(self.patch_size, self.patch_size), mode='bicubic', align_corners=True, antialias=False)
+ src_proj_weight = rearrange(src_proj_weight, 'b c h w -> b (c h w)')
+ targ_proj_weight.data.copy_(src_proj_weight)
+
+ def embed_patches(self, x: torch.Tensor) -> torch.Tensor:
+ patches = self.im_to_patches(x)
+ patches = self.embedder(patches)
+ return patches
+
+ def apply_pos_enc(self,
+ patches: torch.Tensor,
+ patch_idxs: Optional[torch.Tensor] = None,
+ input_size: Optional[Tuple[int, int]] = None,
+ ) -> torch.Tensor:
+ if not self.abs_pos:
+ return patches
+
+ pos_enc = self.get_pos_enc(patches.shape[0], patch_idxs, input_size)
+
+ if self.training and self.pos_dropout > 0:
+ keeps = torch.rand(patches.shape[0], 1, 1, dtype=pos_enc.dtype, device=pos_enc.device) > self.pos_dropout
+ pos_enc_drop = torch.where(keeps, pos_enc, 0)
+ else:
+ pos_enc_drop = pos_enc
+
+ return patches + pos_enc_drop, pos_enc
+
+ def get_pos_enc(self,
+ batch_size: int,
+ patch_idxs: Optional[torch.Tensor] = None,
+ input_size: Optional[Tuple[int, int]] = None,
+ ) -> torch.Tensor:
+ if input_size is None:
+ input_dims = self.input_dims
+ else:
+ input_dims = tuple(d // self.patch_size for d in input_size)
+
+ pos_embed = self._get_pos_embeddings(batch_size, input_dims)
+
+ if patch_idxs is None:
+ return pos_embed
+
+ exp_patch_idxs = patch_idxs.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1])
+
+ pos_embed = torch.gather(pos_embed.expand(patch_idxs.shape[0], -1, -1), dim=1, index=exp_patch_idxs)
+ return pos_embed
+
+
+ def _get_pos_embeddings(self, batch_size: int, input_dims: Tuple[int, int]):
+ if (self.num_rows, self.num_cols) == input_dims:
+ return self.pos_embed
+
+ pos_embed = self.pos_embed.reshape(1, self.num_rows, self.num_cols, -1).permute(0, 3, 1, 2)
+
+ def window_select(pos_embed):
+ if input_dims[0] < pos_embed.shape[-2]:
+ pos_embed = pos_embed[..., :input_dims[0], :]
+ if input_dims[1] < pos_embed.shape[-1]:
+ pos_embed = pos_embed[..., :, :input_dims[1]]
+ return pos_embed
+
+ if self.cpe_mode:
+ if self.training:
+ min_scale = math.sqrt(0.1)
+ scale = torch.rand(batch_size, 1, 1, device=pos_embed.device) * (1 - min_scale) + min_scale
+ aspect_min = math.log(3 / 4)
+ aspect_max = -aspect_min
+ aspect = torch.exp(torch.rand(batch_size, 1, 1, device=pos_embed.device) * (aspect_max - aspect_min) + aspect_min)
+
+ scale_x = scale * aspect
+ scale_y = scale * (1 / aspect)
+ scale_xy = torch.stack([scale_x, scale_y], dim=-1).clamp_(0, 1)
+
+ pos_xy = torch.rand(batch_size, 1, 1, 2, device=pos_embed.device) * (1 - scale_xy)
+
+ lin_x = torch.linspace(0, 1, steps=input_dims[1], device=pos_embed.device)[None, None].expand(batch_size, input_dims[0], -1)
+ lin_y = torch.linspace(0, 1, steps=input_dims[0], device=pos_embed.device)[None, :, None].expand(batch_size, -1, input_dims[1])
+
+ lin_xy = torch.stack([lin_x, lin_y], dim=-1)
+
+ grid_xy = lin_xy * scale_xy + pos_xy
+
+ # Convert to [-1, 1] range
+ grid_xy.mul_(2).sub_(1)
+
+ pos_embed = F.grid_sample(
+ pos_embed.float().expand(batch_size, -1, -1, -1),
+ grid=grid_xy,
+ mode='bilinear',
+ padding_mode='zeros',
+ align_corners=True,
+ ).to(pos_embed.dtype)
+ else:
+ # i_rows, i_cols = input_dims
+ # p_rows, p_cols = pos_embed.shape[2:]
+ # if i_rows <= p_rows and i_cols <= p_cols:
+ # left = (p_cols - i_cols) // 2
+ # top = (p_rows - i_rows) // 2
+ # pos_embed = pos_embed[..., top:top+i_rows, left:left+i_cols]
+ # else:
+ max_dim = max(input_dims)
+ pos_embed = F.interpolate(pos_embed.float(), size=(max_dim, max_dim), align_corners=True, mode='bilinear').to(pos_embed.dtype)
+
+ pos_embed = window_select(pos_embed)
+ else:
+ pos_embed = window_select(pos_embed)
+
+ if pos_embed.shape[-2:] != input_dims:
+ pos_embed = F.interpolate(pos_embed.float(), size=input_dims, align_corners=True, mode='bilinear').to(pos_embed.dtype)
+
+ pos_embed = pos_embed.flatten(2).permute(0, 2, 1)
+
+ return pos_embed
+
+
+class Im2Patches(nn.Module):
+ def __init__(self, patch_size: int):
+ super().__init__()
+ self.patch_size = patch_size
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ if self.patch_size == 1:
+ patches = x.flatten(2)
+ patches = patches.permute(0, 2, 1)
+ return patches
+
+ py = x.shape[-2] // self.patch_size
+ px = x.shape[-1] // self.patch_size
+ patches = rearrange(x, 'b c (py yy) (px xx) -> b (py px) (c yy xx)',
+ py=py, yy=self.patch_size,
+ px=px, xx=self.patch_size,
+ )
+ return patches
+
+
+class ViTPatchLinear(nn.Linear):
+ def __init__(self, patch_size: int, embed_dim: int, bias: bool = False, **factory):
+ super().__init__(
+ 3 * (patch_size ** 2),
+ embed_dim,
+ bias=bias,
+ **factory
+ )
+ self.patch_size = patch_size
diff --git a/tim/models/nvidia_radio/radio/vitdet.py b/tim/models/nvidia_radio/radio/vitdet.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfd1b413fbc38ae6c49d7feab806803de566f7a9
--- /dev/null
+++ b/tim/models/nvidia_radio/radio/vitdet.py
@@ -0,0 +1,188 @@
+from collections import defaultdict
+from contextlib import contextmanager
+from logging import getLogger
+import math
+import sys
+from typing import List, Union, Iterable
+
+import numpy as np
+import torch
+from torch import nn
+
+from timm.models import VisionTransformer
+from einops import rearrange
+
+from .extra_models import DinoWrapper
+
+DEFAULT_NUM_WINDOWED = 5
+DEFAULT_NUM_GLOBAL = 4
+
+
+class VitDetArgs:
+ def __init__(self,
+ window_size: int,
+ num_summary_tokens: int,
+ num_windowed: int = None,
+ num_global: int = None,
+ ):
+ self.window_size = window_size
+ self.num_summary_tokens = num_summary_tokens
+ self.num_windowed = num_windowed
+ self.num_global = num_global
+
+
+def apply_vitdet_arch(model: Union[VisionTransformer, DinoWrapper], args: VitDetArgs):
+ if isinstance(model, VisionTransformer):
+ patch_embed = getattr(model, 'patch_generator', model.patch_embed)
+
+ return ViTDetHook(patch_embed, model.blocks, args)
+ elif isinstance(model, DinoWrapper):
+ inner = model.inner
+
+ patch_embed = getattr(inner, 'patch_generator', inner.patch_embed)
+ return ViTDetHook(patch_embed, inner.blocks, args)
+ else:
+ print(f'Warning: Unable to apply VitDet aug!', file=sys.stderr)
+
+
+class ViTDetHook:
+ def __init__(self,
+ embedder: nn.Module,
+ blocks: nn.Sequential,
+ args: VitDetArgs,
+ ):
+ self.blocks = blocks
+ self.num_summary_tokens = args.num_summary_tokens
+ self.window_size = args.window_size
+
+ self._input_resolution = None
+ self._num_windows = None
+ self._cls_patch = None
+ self._order_cache = dict()
+
+ embedder.register_forward_pre_hook(self._enter_model)
+
+ # This will decide if we window-fy the patches
+ # and enable vit-det for this iteration, and if so,
+ # rearrange the patches for efficient mode switching
+ blocks.register_forward_pre_hook(self._enter_blocks)
+
+ is_global = True
+ if args.num_windowed is not None:
+ period = args.num_windowed + 1
+ else:
+ num_global = args.num_global or DEFAULT_NUM_GLOBAL
+ period = max(len(blocks) // num_global, 1)
+
+ for i, layer in enumerate(blocks[:-1]):
+ ctr = i % period
+ if ctr == 0:
+ layer.register_forward_pre_hook(self._to_windows)
+ is_global = False
+ elif ctr == period - 1:
+ layer.register_forward_pre_hook(self._to_global)
+ is_global = True
+
+ # Always ensure the final layer is a global layer
+ if not is_global:
+ blocks[-1].register_forward_pre_hook(self._to_global)
+
+ blocks.register_forward_hook(self._exit_model)
+
+ def _enter_model(self, _, input: List[torch.Tensor]):
+ self._input_resolution = input[0].shape[-2:]
+
+ def _enter_blocks(self, _, input: List[torch.Tensor]):
+ # print(f'{get_rank()} - ViTDet Window Size: {self._window_size}', file=sys.stderr)
+
+ patches = input[0]
+ patches = self._rearrange_patches(patches)
+
+ return (patches,) + input[1:]
+
+ def _to_windows(self, _, input: List[torch.Tensor]):
+ patches = input[0]
+
+ if self.num_summary_tokens:
+ self._cls_patch = patches[:, :self.num_summary_tokens]
+ patches = patches[:, self.num_summary_tokens:]
+
+ patches = rearrange(
+ patches, 'b (p t) c -> (b p) t c',
+ p=self._num_windows, t=self.window_size ** 2,
+ )
+
+ return (patches,) + input[1:]
+
+ def _to_global(self, _, input: List[torch.Tensor]):
+ patches = input[0]
+
+ patches = rearrange(
+ patches, '(b p) t c -> b (p t) c',
+ p=self._num_windows, t=self.window_size ** 2,
+ b=patches.shape[0] // self._num_windows,
+ )
+
+ if self.num_summary_tokens:
+ patches = torch.cat([
+ self._cls_patch,
+ patches,
+ ], dim=1)
+
+ return (patches,) + input[1:]
+
+ def _exit_model(self, _, inputs: List[torch.Tensor], patches: torch.Tensor):
+ # Return patches to their original order
+ patch_order = self._order_cache[self._input_resolution][0]
+ patch_order = patch_order.reshape(1, -1, 1).expand_as(patches)
+
+ ret_patches = torch.empty_like(patches)
+ ret_patches = torch.scatter(
+ ret_patches,
+ dim=1,
+ index=patch_order,
+ src=patches,
+ )
+
+ return ret_patches
+
+ def _rearrange_patches(self, patches: torch.Tensor):
+ # We rearrange the patches so that we can efficiently
+ # switch between windowed and global mode by just
+ # reshaping the tensor
+
+ patch_order, self._num_windows = self._order_cache.get(self._input_resolution, (None, None))
+ if patch_order is None:
+ num_feat_patches = patches.shape[1] - self.num_summary_tokens
+ num_pixels = self._input_resolution[0] * self._input_resolution[1]
+
+ patch_size = int(round(math.sqrt(num_pixels / num_feat_patches)))
+ rows = self._input_resolution[-2] // patch_size
+ cols = self._input_resolution[-1] // patch_size
+
+ w_rows = rows // self.window_size
+ w_cols = cols // self.window_size
+
+ patch_order = torch.arange(0, num_feat_patches, device=patches.device)
+
+ patch_order = rearrange(
+ patch_order, '(wy py wx px) -> (wy wx py px)',
+ wy=w_rows, wx=w_cols,
+ py=self.window_size, px=self.window_size,
+ )
+
+ if self.num_summary_tokens:
+ patch_order = torch.cat([
+ torch.arange(self.num_summary_tokens, dtype=patch_order.dtype, device=patch_order.device),
+ patch_order + self.num_summary_tokens,
+ ])
+
+ self._num_windows = w_rows * w_cols
+ self._order_cache[self._input_resolution] = (
+ patch_order,
+ self._num_windows,
+ )
+
+ patch_order = patch_order.reshape(1, -1, 1).expand_as(patches)
+ patches = torch.gather(patches, dim=1, index=patch_order)
+ return patches
diff --git a/tim/models/t2i/tim_model.py b/tim/models/t2i/tim_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a6159792accdf3d6ea9a5a937c0d99d57f97915
--- /dev/null
+++ b/tim/models/t2i/tim_model.py
@@ -0,0 +1,493 @@
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# --------------------------------------------------------
+# References:
+# GLIDE: https://github.com/openai/glide-text2im
+# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
+# --------------------------------------------------------
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+import math
+from timm.layers.mlp import SwiGLU, Mlp
+from timm.models.vision_transformer import PatchEmbed, Attention
+from tim.models.utils.funcs import build_mlp, modulate, get_parameter_dtype
+from tim.models.utils.rope import VisionRotaryEmbedding, rotate_half
+from flash_attn import flash_attn_func
+
+
+#################################################################################
+# Embedding Layers for Timesteps and Class Labels #
+#################################################################################
+class TimestepEmbedder(nn.Module):
+ """
+ Embeds scalar timesteps into vector representations.
+ """
+ def __init__(self, hidden_size, frequency_embedding_size=256):
+ super().__init__()
+ self.mlp = nn.Sequential(
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
+ nn.SiLU(),
+ nn.Linear(hidden_size, hidden_size, bias=True),
+ )
+ self.frequency_embedding_size = frequency_embedding_size
+
+ @staticmethod
+ def positional_embedding(t, dim, max_period=10000):
+ """
+ Create sinusoidal timestep embeddings.
+ :param t: a 1-D Tensor of N indices, one per batch element.
+ These may be fractional.
+ :param dim: the dimension of the output.
+ :param max_period: controls the minimum frequency of the embeddings.
+ :return: an (N, D) Tensor of positional embeddings.
+ """
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
+ half = dim // 2
+ freqs = torch.exp(
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
+ ).to(device=t.device)
+ args = t[:, None].float() * freqs[None]
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
+ if dim % 2:
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
+ return embedding
+
+ def forward(self, t):
+ self.timestep_embedding = self.positional_embedding
+ t_freq = self.timestep_embedding(t, dim=self.frequency_embedding_size).to(t.dtype)
+ t_emb = self.mlp(t_freq)
+ return t_emb
+
+
+class CaptionEmbedder(nn.Module):
+ """
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
+ """
+ def __init__(self, cap_feat_dim, hidden_size):
+ super().__init__()
+ self.norm = nn.LayerNorm(cap_feat_dim)
+ self.mlp = SwiGLU(in_features=cap_feat_dim, hidden_features=hidden_size*4, out_features=hidden_size)
+
+
+ def forward(self, cap_feats):
+ '''
+ cfg is also essential in text-to-image generation
+ '''
+ cap_feats = self.mlp(self.norm(cap_feats))
+ return cap_feats
+
+
+
+#################################################################################
+# Attention Block #
+#################################################################################
+
+class Attention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int = 8,
+ qkv_bias: bool = False,
+ qk_norm: bool = False,
+ attn_drop: float = 0.,
+ proj_drop: float = 0.,
+ norm_layer: nn.Module = nn.LayerNorm,
+ distance_aware: bool = False,
+ ) -> None:
+ super().__init__()
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.scale = self.head_dim ** -0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.distance_aware = distance_aware
+ if distance_aware:
+ self.qkv_d = nn.Linear(dim, dim * 3, bias=False)
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ def forward(self, x: torch.Tensor, freqs_cos, freqs_sin, attn_type='fused_attn', delta_t=None) -> torch.Tensor:
+ B, N, C = x.shape
+ if self.distance_aware:
+ qkv = self.qkv(x) + self.qkv_d(delta_t)
+ else:
+ qkv = self.qkv(x)
+ if attn_type == 'flash_attn': # q, k, v: (B, N, n_head, d_head)
+ qkv = qkv.reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 1, 3, 4)
+ else: # q, k, v: (B, n_head, N, d_head)
+ qkv = qkv.reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
+ ori_dtype = qkv.dtype
+ q, k, v = qkv.unbind(0)
+ q, k = self.q_norm(q), self.k_norm(k)
+
+ q = q * freqs_cos + rotate_half(q) * freqs_sin
+ k = k * freqs_cos + rotate_half(k) * freqs_sin
+ q, k = q.to(ori_dtype), k.to(ori_dtype)
+
+ if attn_type == 'flash_attn':
+ x = flash_attn_func(
+ q, k, v,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ )
+ x = x.reshape(B, N, C)
+ elif attn_type == 'fused_attn':
+ x = F.scaled_dot_product_attention(
+ q, k, v,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ )
+ x = x.transpose(1, 2).reshape(B, N, C)
+ else:
+ q = q * self.scale
+ attn = q @ k.transpose(-2, -1)
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+ x = attn @ v
+ x = x.transpose(1, 2).reshape(B, N, C)
+
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+
+
+
+
+#################################################################################
+# Cross Attention Block #
+#################################################################################
+
+class CrossAttention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int = 8,
+ qkv_bias: bool = False,
+ qk_norm: bool = False,
+ attn_drop: float = 0.,
+ proj_drop: float = 0.,
+ norm_layer: nn.Module = nn.LayerNorm,
+ ) -> None:
+ super().__init__()
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.scale = self.head_dim ** -0.5
+
+ self.q = nn.Linear(dim, dim, bias=qkv_bias)
+ self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ def forward(self, x: torch.Tensor, y: torch.Tensor, freqs_cos, freqs_sin, attn_type='fused_attn') -> torch.Tensor:
+ B, N, C = x.shape
+ _, M, _ = y.shape
+ if attn_type == 'flash_attn': # q, k, v: (B, N, n_head, d_head)
+ q = self.q(x).reshape(B, N, self.num_heads, self.head_dim)
+ kv = self.kv(y).reshape(B, M, 2, self.num_heads, self.head_dim).permute(2, 0, 1, 3, 4)
+ else: # q, k, v: (B, n_head, N, d_head)
+ q = self.q(x).reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
+ kv = self.kv(y).reshape(B, M, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
+ ori_dtype = q.dtype
+ k, v = kv.unbind(0)
+ q, k = self.q_norm(q), self.k_norm(k)
+ q = q * freqs_cos + rotate_half(q) * freqs_sin
+ q, k = q.to(ori_dtype), k.to(ori_dtype)
+
+ if attn_type == 'flash_attn':
+ x = flash_attn_func(
+ q, k, v,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ )
+ x = x.reshape(B, N, C)
+ elif attn_type == 'fused_attn':
+ x = F.scaled_dot_product_attention(
+ q, k, v,
+ dropout_p=self.attn_drop.p if self.training else 0.,
+ )
+ x = x.transpose(1, 2).reshape(B, N, C)
+ else:
+ q = q * self.scale
+ attn = q @ k.transpose(-2, -1)
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+ x = attn @ v
+ x = x.transpose(1, 2).reshape(B, N, C)
+
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+
+
+
+
+#################################################################################
+# Core TiM Model #
+#################################################################################
+
+class TiMBlock(nn.Module):
+ """
+ A TiM block with adaptive layer norm zero (adaLN-Zero) conditioning.
+ """
+ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, **block_kwargs):
+ super().__init__()
+ self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ distance_aware = block_kwargs.get('distance_aware', False)
+ self.attn = Attention(
+ hidden_size, num_heads=num_heads, qkv_bias=True, qk_norm=block_kwargs["qk_norm"],
+ distance_aware=distance_aware
+ )
+ self.norm2_i = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.norm2_t = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.cross_attn = CrossAttention(
+ hidden_size, num_heads=num_heads, qkv_bias=True, qk_norm=block_kwargs["qk_norm"]
+ )
+ self.norm3 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
+ self.mlp = SwiGLU(
+ in_features=hidden_size, hidden_features=(mlp_hidden_dim*2)//3, bias=True
+ )
+ if block_kwargs.get('lora_hidden_size', None) != None:
+ lora_hidden_size = block_kwargs['lora_hidden_size']
+ else:
+ lora_hidden_size = (hidden_size//4)*3
+ self.adaLN_modulation = SwiGLU(
+ in_features=hidden_size, hidden_features=lora_hidden_size, out_features=9*hidden_size, bias=True
+ )
+
+
+
+ def forward(self, x, y, c, freqs_cos, freqs_sin, attn_type, delta_t=None):
+ (
+ shift_msa, scale_msa, gate_msa,
+ shift_msc, scale_msc, gate_msc,
+ shift_mlp, scale_mlp, gate_mlp
+ ) = self.adaLN_modulation(c).chunk(9, dim=-1)
+ x = x + gate_msa * self.attn(modulate(self.norm1(x), shift_msa, scale_msa), freqs_cos, freqs_sin, attn_type, delta_t)
+ x = x + gate_msc * self.cross_attn(modulate(self.norm2_i(x), shift_msc, scale_msc), self.norm2_t(y), freqs_cos, freqs_sin, attn_type)
+ x = x + gate_mlp * self.mlp(modulate(self.norm3(x), shift_mlp, scale_mlp))
+
+ return x
+
+
+class FinalLayer(nn.Module):
+ """
+ The final layer of TiM.
+ """
+ def __init__(self, hidden_size, patch_size, out_channels):
+ super().__init__()
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
+ self.adaLN_modulation = SwiGLU(
+ in_features=hidden_size, hidden_features=hidden_size//2, out_features=2*hidden_size, bias=True
+ )
+
+
+ def forward(self, x, c):
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
+ x = modulate(self.norm_final(x), shift, scale)
+ x = self.linear(x)
+
+ return x
+
+
+class TiM(nn.Module):
+ """
+ Diffusion model with a Transformer backbone.
+ """
+ def __init__(
+ self,
+ input_size=32,
+ patch_size=2,
+ in_channels=4,
+ hidden_size=1152,
+ encoder_depth=8,
+ depth=28,
+ num_heads=16,
+ mlp_ratio=4.0,
+ cap_feat_dim=2048,
+ z_dim=768,
+ projector_dim=2048,
+ use_checkpoint: bool = False,
+ new_condition: str = 't-r',
+ use_new_embed: bool = False,
+ **block_kwargs # qk_norm
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = in_channels
+ self.patch_size = patch_size
+ self.num_heads = num_heads
+ self.cap_feat_dim = cap_feat_dim
+ self.encoder_depth = encoder_depth
+ self.use_checkpoint = use_checkpoint
+ self.new_condition = new_condition
+ self.use_new_embed = use_new_embed
+
+ self.x_embedder = PatchEmbed(
+ input_size, patch_size, in_channels, hidden_size, bias=True, strict_img_size=False
+ )
+ self.t_embedder = TimestepEmbedder(hidden_size) # timestep embedding type
+ if use_new_embed:
+ self.delta_embedder = TimestepEmbedder(hidden_size)
+ self.y_embedder = CaptionEmbedder(cap_feat_dim, hidden_size)
+ # Will use fixed sin-cos embedding:
+ self.rope = VisionRotaryEmbedding(head_dim=hidden_size//num_heads)
+
+ self.blocks = nn.ModuleList([
+ TiMBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, **block_kwargs) for _ in range(depth)
+ ])
+ self.projector = build_mlp(hidden_size, projector_dim, z_dim)
+ self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels)
+ self.initialize_weights()
+
+ def initialize_weights(self):
+ # Initialize transformer layers:
+ def _basic_init(module):
+ if isinstance(module, nn.Linear):
+ torch.nn.init.xavier_uniform_(module.weight)
+ if module.bias is not None:
+ nn.init.constant_(module.bias, 0)
+ self.apply(_basic_init)
+
+ # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
+ w = self.x_embedder.proj.weight.data
+ nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
+ nn.init.constant_(self.x_embedder.proj.bias, 0)
+
+ # Initialize label embedding table:
+ nn.init.normal_(self.y_embedder.mlp.fc1_g.weight, std=0.02)
+ nn.init.normal_(self.y_embedder.mlp.fc1_x.weight, std=0.02)
+ nn.init.normal_(self.y_embedder.mlp.fc2.weight, std=0.02)
+
+ # Initialize timestep embedding MLP:
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
+
+ # Zero-out adaLN modulation layers in TiM blocks:
+ for block in self.blocks:
+ nn.init.constant_(block.adaLN_modulation.fc2.weight, 0)
+ nn.init.constant_(block.adaLN_modulation.fc2.bias, 0)
+
+
+ # Zero-out output layers:
+ nn.init.constant_(self.final_layer.adaLN_modulation.fc2.weight, 0)
+ nn.init.constant_(self.final_layer.adaLN_modulation.fc2.bias, 0)
+ nn.init.constant_(self.final_layer.linear.weight, 0)
+ nn.init.constant_(self.final_layer.linear.bias, 0)
+
+ def unpatchify(self, x, H, W):
+ """
+ x: (N, T, patch_size**2 * C)
+ imgs: (N, H, W, C)
+ """
+ c = self.out_channels
+ p = self.patch_size
+ h, w = int(H/p), int(W/p)
+
+
+ x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
+ x = torch.einsum('nhwpqc->nchpwq', x)
+ imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p))
+ return imgs
+
+ def get_rope(self, h, w, attn_type):
+ grid_h = torch.arange(h)
+ grid_w = torch.arange(w)
+ grid = torch.meshgrid(grid_h, grid_w, indexing='xy')
+ grid = torch.stack(grid, dim=0).reshape(2, -1).unsqueeze(0)
+ freqs_cos, freqs_sin = self.rope.get_cached_2d_rope_from_grid(grid)
+ if attn_type == 'flash_attn': # (1, N, 1, d_head)
+ return freqs_cos.unsqueeze(2), freqs_sin.unsqueeze(2)
+ else: # (1, 1, N, d_head)
+ return freqs_cos.unsqueeze(1), freqs_sin.unsqueeze(1)
+
+
+ def forward(self, x, t, r, y, attn_type='flash_attn', return_zs=False, jvp=False):
+ """
+ Forward pass of TiM.
+ x: (B, C, H, W) tensor of spatial inputs (images or latent representations of images)
+ t: (B,) tensor of diffusion timesteps
+ y: (B,) tensor of class labels
+ """
+ B, C, H, W = x.shape
+ x = self.x_embedder(x) # (N, N, D), where T = H * W / patch_size ** 2
+
+ # timestep and class embedding
+ t_embed = self.t_embedder(t).unsqueeze(1) # (B, 1, D)
+ delta_embed = self.get_delta_embed(t, r).unsqueeze(1) # (B, 1, D)
+ y = self.y_embedder(y) # (B, M, D)
+ c = t_embed + delta_embed # (B, 1, D)
+
+
+ freqs_cos, freqs_sin = self.get_rope(
+ int(H/self.patch_size), int(W/self.patch_size), attn_type
+ )
+
+ for i, block in enumerate(self.blocks):
+ if not self.use_checkpoint or jvp:
+ x = block(x, y, c, freqs_cos, freqs_sin, attn_type, delta_embed) # (B, N, D)
+ else:
+ x = torch.utils.checkpoint.checkpoint(
+ self.ckpt_wrapper(block), x, y, c, freqs_cos, freqs_sin, attn_type, delta_embed
+ )
+ if (i + 1) == self.encoder_depth:
+ h_proj = self.projector(x)
+ x = self.final_layer(x, c) # (B, N, patch_size ** 2 * out_channels)
+ x = self.unpatchify(x, H, W) # (b, out_channels, H, W)
+
+ if return_zs:
+ return x, h_proj
+ else:
+ return x
+
+ def get_delta_embed(self, t, r):
+ if self.use_new_embed:
+ delta_embedder = self.delta_embedder
+ else:
+ delta_embedder = self.t_embedder
+ if self.new_condition == 't-r':
+ delta_embed = delta_embedder(t-r)
+ elif self.new_condition == 'r':
+ delta_embed = delta_embedder(r)
+ elif self.new_condition == 't,r':
+ delta_embed = self.t_embedder(t) + delta_embedder(r)
+ elif self.new_condition == 't,t-r':
+ delta_embed = self.t_embedder(t) + delta_embedder(t-r)
+ elif self.new_condition == 'r,t-r':
+ delta_embed = self.t_embedder(r) + delta_embedder(t-r)
+ elif self.new_condition == 't,r,t-r':
+ delta_embed = self.t_embedder(t) + self.t_embedder(r) + delta_embedder(t-r)
+ else:
+ raise NotImplementedError
+ return delta_embed
+
+ def ckpt_wrapper(self, module):
+ def ckpt_forward(*inputs):
+ outputs = module(*inputs)
+ return outputs
+ return ckpt_forward
+
+
+ @property
+ def dtype(self) -> torch.dtype:
+ """
+ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
+ """
+ return get_parameter_dtype(self)
+
+
+
+
\ No newline at end of file
diff --git a/tim/models/utils/funcs.py b/tim/models/utils/funcs.py
new file mode 100644
index 0000000000000000000000000000000000000000..2197b1bd4bd44687ae74d6bf2c4e814ca6b59e24
--- /dev/null
+++ b/tim/models/utils/funcs.py
@@ -0,0 +1,53 @@
+import torch
+import torch.nn as nn
+
+from torch import Tensor
+from typing import List, Tuple
+from itertools import chain
+
+
+
+def expand_t_like_x(t, x):
+ """Function to reshape time t to broadcastable dimension of x
+ Args:
+ t: [batch_dim,], time vector
+ x: [batch_dim,...], data point
+ """
+ dims = [1] * (len(x.size()) - 1)
+ t = t.view(t.size(0), *dims)
+ return t
+
+
+def build_mlp(hidden_size, projector_dim, z_dim):
+ return nn.Sequential(
+ nn.Linear(hidden_size, projector_dim),
+ nn.SiLU(),
+ nn.Linear(projector_dim, projector_dim),
+ nn.SiLU(),
+ nn.Linear(projector_dim, z_dim),
+ )
+
+def modulate(x, shift, scale):
+ return x * (1 + scale) + shift
+
+
+def get_parameter_dtype(parameter: torch.nn.Module):
+ try:
+ params = tuple(parameter.parameters())
+ if len(params) > 0:
+ return params[0].dtype
+
+ buffers = tuple(parameter.buffers())
+ if len(buffers) > 0:
+ return buffers[0].dtype
+
+ except StopIteration:
+ # For torch.nn.DataParallel compatibility in PyTorch 1.5
+
+ def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]:
+ tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
+ return tuples
+
+ gen = parameter._named_members(get_members_fn=find_tensor_attributes)
+ first_tuple = next(gen)
+ return first_tuple[1].dtype
\ No newline at end of file
diff --git a/tim/models/utils/norms.py b/tim/models/utils/norms.py
new file mode 100644
index 0000000000000000000000000000000000000000..d28bba485e90135634543fc53974354a816655cc
--- /dev/null
+++ b/tim/models/utils/norms.py
@@ -0,0 +1,403 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+
+from functools import partial
+
+import torch
+import torch.nn as nn
+
+import triton
+import triton.language as tl
+import torch.nn.functional as F
+
+
+def create_norm(norm_type: str, dim: int, eps: float = 1e-6):
+ """
+ Creates the specified normalization layer based on the norm_type.
+
+ Args:
+ norm_type (str): The type of normalization layer to create.
+ Supported types: 1. rmsnorm 2. fused_rmsnorm 3. layernorm 4. np_layernorm
+ dim (int): The dimension of the normalization layer.
+ eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
+
+ Returns:
+ The created normalization layer.
+
+ Raises:
+ NotImplementedError: If an unknown norm_type is provided.
+ """
+ if norm_type == None or norm_type == "":
+ return nn.Identity()
+ norm_type = norm_type.lower() # Normalize to lowercase
+
+ if norm_type == "layernorm":
+ return nn.LayerNorm(dim, eps=eps, bias=False)
+ elif norm_type == "np_layernorm":
+ return nn.LayerNorm(dim, eps=eps, elementwise_affine=False, bias=False)
+ elif norm_type == "np_layernorm_32":
+ return FP32_Layernorm(dim, eps=eps, elementwise_affine=False, bias=True)
+ elif norm_type == "layernorm_32":
+ return FP32_Layernorm(dim, eps=eps, bias=True)
+ elif norm_type == "rmsnorm":
+ return RMSNorm(dim, include_weight=True, eps=eps)
+ elif norm_type == "np_rmsnorm":
+ return RMSNorm(dim, include_weight=False, eps=1e-6)
+ elif norm_type == "fused_rmsnorm":
+ return FusedRMSNorm(dim, eps=1/65536)
+ elif norm_type == "fused_rmsnorm_32":
+ return FusedRMSNorm32(dim, eps=1e-6)
+ elif norm_type == 'none':
+ return nn.Identity()
+ else:
+ return nn.Identity()
+
+class FP32_Layernorm(nn.LayerNorm):
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ origin_dtype = inputs.dtype
+ if self.bias == None and self.weight == None:
+ return F.layer_norm(
+ input=inputs.float(),
+ normalized_shape=self.normalized_shape,
+ eps=self.eps
+ ).to(origin_dtype)
+ elif self.bias == None:
+ return F.layer_norm(
+ input=inputs.float(),
+ normalized_shape=self.normalized_shape,
+ weight=self.weight.float(),
+ eps=self.eps
+ ).to(origin_dtype)
+ else:
+ return F.layer_norm(
+ input=inputs.float(),
+ normalized_shape=self.normalized_shape,
+ weight=self.weight.float(),
+ bias=self.bias.float(),
+ eps=self.eps
+ ).to(origin_dtype)
+
+class FusedRMSNorm(nn.Module):
+ """Fused RMS Norm, wraps a fused Triton Kernel"""
+
+ def __init__(
+ self,
+ dim: int,
+ eps: float = 1e-6,
+ ):
+ super().__init__()
+ self.eps = eps
+ self.weight = nn.Parameter(torch.ones(dim))
+ self.fused_rms_norm_fn = fused_rms_norm_fn
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """leverages Triton Fused RMS Norm kernel"""
+ return self.fused_rms_norm_fn(
+ x,
+ self.weight,
+ eps=self.eps,
+ )
+
+ def reset_parameters(self):
+ torch.nn.init.ones_(self.weight) # type: ignore
+
+class FusedRMSNorm32(nn.Module):
+ """Fused RMS Norm, wraps a fused Triton Kernel"""
+
+ def __init__(
+ self,
+ dim: int,
+ eps: float = 1e-6,
+ ):
+ super().__init__()
+ self.eps = eps
+ self.weight = nn.Parameter(torch.ones(dim))
+ self.fused_rms_norm_fn = fused_rms_norm_fn
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """leverages Triton Fused RMS Norm kernel"""
+ dtype = x.dtype
+ return self.fused_rms_norm_fn(
+ x.to(torch.float32),
+ self.weight,
+ eps=self.eps,
+ ).to(dtype)
+
+ def reset_parameters(self):
+ torch.nn.init.ones_(self.weight) # type: ignore
+
+class RMSNorm(nn.Module):
+ def __init__(self, dim: int, include_weight: bool = True, eps: float = 1e-6, **block_kwargs):
+ """
+ Initialize the RMSNorm normalization layer.
+
+ Args:
+ dim (int): The dimension of the input tensor.
+ include_weight: bool: Whether include weight in the normalization
+ eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
+
+ Attributes:
+ eps (float): A small value added to the denominator for numerical stability.
+ weight (nn.Parameter): Learnable scaling parameter.
+
+ """
+ super().__init__()
+ self.eps = eps
+ if include_weight:
+ self.weight = nn.Parameter(torch.ones(dim))
+ else:
+ self.weight = None
+
+ def _norm(self, x):
+ """
+ Apply the RMSNorm normalization to the input tensor.
+
+ Args:
+ x (torch.Tensor): The input tensor.
+
+ Returns:
+ torch.Tensor: The normalized tensor.
+
+ """
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
+
+ def forward(self, x):
+ """
+ Forward pass through the RMSNorm layer.
+
+ Args:
+ x (torch.Tensor): The input tensor.
+
+ Returns:
+ torch.Tensor: The output tensor after applying RMSNorm.
+
+ """
+ output = self._norm(x.float()).type_as(x)
+ if self.weight == None:
+ return output
+ else:
+ return output * self.weight
+
+
+
+# FusedRMSNorm in Triton
+
+# Credit
+# Tri Dao's Triton LayerNorm: https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/ops/triton/layer_norm.py
+# Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
+
+
+@triton.autotune(
+ configs=[
+ triton.Config({}, num_warps=1),
+ triton.Config({}, num_warps=2),
+ triton.Config({}, num_warps=4),
+ triton.Config({}, num_warps=8),
+ triton.Config({}, num_warps=16),
+ triton.Config({}, num_warps=32),
+ ],
+ key=["N"],
+)
+@triton.jit
+def _rms_norm_fwd_kernel(
+ X,
+ stride_x,
+ Y,
+ stride_y,
+ W,
+ Rstd,
+ eps,
+ M, # num rows
+ N, # num cols
+ block_N: tl.constexpr,
+):
+ row = tl.program_id(0)
+ cols = tl.arange(0, block_N)
+
+ # Load input data and weights
+ mask = cols < N
+ x = tl.load(X + row * stride_x + cols, mask=mask, other=0.0).to(tl.float32)
+ w = tl.load(W + cols, mask=mask, other=0.0).to(tl.float32)
+
+ # Compute mean and variance
+ xbar = tl.where(cols < N, x, 0.0)
+ var = tl.sum(xbar * xbar, axis=0) / N
+ rstd = 1 / tl.sqrt(var + eps)
+
+ # Store the reciprocal standard deviation
+ tl.store(Rstd + row, rstd)
+
+ # Normalize and apply linear transformation
+ x_hat = x * rstd
+ y = x_hat * w
+
+ # Write output
+ tl.store(Y + row * stride_y + cols, y, mask=mask)
+
+
+@triton.autotune(
+ configs=[
+ triton.Config({}, num_warps=1),
+ triton.Config({}, num_warps=2),
+ triton.Config({}, num_warps=4),
+ triton.Config({}, num_warps=8),
+ triton.Config({}, num_warps=16),
+ triton.Config({}, num_warps=32),
+ ],
+ key=["N"],
+)
+@triton.jit
+def _rms_norm_bwd_kernel_sm(
+ X,
+ stride_x,
+ W,
+ DY,
+ stride_dy,
+ DX,
+ stride_dx,
+ Rstd,
+ DW,
+ eps,
+ M, # num rows
+ N, # num cols
+ rows_per_program,
+ block_N: tl.constexpr,
+):
+ row_block_id = tl.program_id(0)
+ row_start = row_block_id * rows_per_program
+ cols = tl.arange(0, block_N)
+ mask = cols < N
+
+ # Load weights
+ w = tl.load(W + cols, mask=mask, other=0.0).to(tl.float32)
+
+ # Accumulate gradients for weights
+ dw = tl.zeros((block_N,), dtype=tl.float32)
+
+ row_end = min(row_start + rows_per_program, M)
+ for row in range(row_start, row_end):
+ # Load input, output gradient, and reciprocal standard deviation
+ x = tl.load(X + row * stride_x + cols, mask=mask, other=0.0).to(tl.float32)
+ dy = tl.load(DY + row * stride_dy + cols, mask=mask, other=0.0).to(tl.float32)
+ rstd = tl.load(Rstd + row)
+
+ # Compute normalized input and gradients
+ x_hat = x * rstd
+ wdy = w * dy
+ dw += dy * x_hat
+ c1 = tl.sum(x_hat * wdy, axis=0) / N
+ dx = (wdy - x_hat * c1) * rstd
+
+ # Store input gradient
+ tl.store(DX + row * stride_dx + cols, dx, mask=mask)
+
+ # Store weight gradients
+ tl.store(DW + row_block_id * N + cols, dw, mask=mask)
+
+
+class TritonFusedRMSNorm(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x, weight, eps):
+ x_shape_start = x.shape
+
+ # Flatten input
+ x = x.view(-1, x.shape[-1])
+ if x.stride(-1) != 1:
+ x = x.contiguous()
+ if weight.stride(-1) != 1:
+ weight = weight.contiguous()
+
+ M, N = x.shape
+ y = torch.empty_like(x)
+ rstd = torch.empty((M,), dtype=torch.float32, device=x.device)
+
+ max_size = 65536 // x.element_size()
+ block_N = min(max_size, triton.next_power_of_2(N))
+
+ if N > block_N:
+ raise ValueError(f"N {N} must be <= {block_N=}")
+
+ grid = lambda meta: (M,)
+ _rms_norm_fwd_kernel[grid](
+ x,
+ x.stride(0),
+ y,
+ y.stride(0),
+ weight,
+ rstd,
+ eps,
+ M,
+ N,
+ block_N,
+ )
+
+ ctx.eps = eps
+ ctx.save_for_backward(x, weight, rstd)
+ ctx.x_shape_start = x_shape_start
+
+ y = y.reshape(x_shape_start)
+ return y
+
+ @staticmethod
+ def backward(ctx, dy):
+ x, weight, rstd = ctx.saved_tensors
+ eps = ctx.eps
+ x_shape_start = ctx.x_shape_start
+
+ # Flatten input and output gradients
+ dy = dy.view(-1, dy.shape[-1])
+ if dy.stride(-1) != 1:
+ dy = dy.contiguous()
+
+ M, N = dy.shape
+ dx = torch.empty_like(x)
+ dw = torch.empty_like(weight)
+
+ sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count
+ _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device)
+
+ max_size = 65536 // x.element_size()
+ block_N = min(max_size, triton.next_power_of_2(N))
+ rows_per_sm = math.ceil(M / sm_count)
+
+ if N > block_N:
+ raise ValueError(f"N {N} must be <= {block_N=}")
+
+ grid = lambda meta: (sm_count,)
+ _rms_norm_bwd_kernel_sm[grid](
+ x,
+ x.stride(0),
+ weight,
+ dy,
+ dy.stride(0),
+ dx,
+ dx.stride(0),
+ rstd,
+ _dw,
+ eps,
+ M,
+ N,
+ rows_per_sm,
+ block_N,
+ )
+ dw = _dw.sum(0).to(weight.dtype)
+ dx = dx.view(x_shape_start)
+ return dx, dw, None
+
+
+# expose fusedRMSNorm as a function
+def fused_rms_norm_fn(
+ x,
+ weight,
+ eps=1e-6,
+):
+ return TritonFusedRMSNorm.apply(
+ x,
+ weight,
+ eps,
+ )
\ No newline at end of file
diff --git a/tim/models/utils/rope.py b/tim/models/utils/rope.py
new file mode 100644
index 0000000000000000000000000000000000000000..dad0a3e81b2c49712aa4869daeb35b8958899908
--- /dev/null
+++ b/tim/models/utils/rope.py
@@ -0,0 +1,305 @@
+# --------------------------------------------------------
+# FiT: A Flexible Vision Transformer for Image Generation
+#
+# Based on the following repository
+# https://github.com/lucidrains/rotary-embedding-torch
+# https://github.com/jquesnelle/yarn/blob/HEAD/scaled_rope
+# https://colab.research.google.com/drive/1VI2nhlyKvd5cw4-zHvAIk00cAVj2lCCC#scrollTo=b80b3f37
+# --------------------------------------------------------
+
+import math
+from math import pi
+from typing import Optional, Any, Union, Tuple
+import torch
+from torch import nn
+
+from einops import rearrange, repeat
+from functools import lru_cache
+
+#################################################################################
+# NTK Operations #
+#################################################################################
+
+def find_correction_factor(num_rotations, dim, base=10000, max_position_embeddings=2048):
+ return (dim * math.log(max_position_embeddings/(num_rotations * 2 * math.pi)))/(2 * math.log(base)) #Inverse dim formula to find number of rotations
+
+def find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=2048):
+ low = math.floor(find_correction_factor(low_rot, dim, base, max_position_embeddings))
+ high = math.ceil(find_correction_factor(high_rot, dim, base, max_position_embeddings))
+ return max(low, 0), min(high, dim-1) #Clamp values just in case
+
+def linear_ramp_mask(min, max, dim):
+ if min == max:
+ max += 0.001 #Prevent singularity
+
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
+ ramp_func = torch.clamp(linear_func, 0, 1)
+ return ramp_func
+
+def find_newbase_ntk(dim, base=10000, scale=1):
+ # Base change formula
+ return base * scale ** (dim / (dim-2))
+
+def get_mscale(scale=torch.Tensor):
+ # if scale <= 1:
+ # return 1.0
+ # return 0.1 * math.log(scale) + 1.0
+ return torch.where(scale <= 1., torch.tensor(1.0), 0.1 * torch.log(scale) + 1.0)
+
+def get_proportion(L_test, L_train):
+ L_test = L_test * 2
+ return torch.where(torch.tensor(L_test/L_train) <= 1., torch.tensor(1.0), torch.sqrt(torch.log(torch.tensor(L_test))/torch.log(torch.tensor(L_train))))
+ # return torch.sqrt(torch.log(torch.tensor(L_test))/torch.log(torch.tensor(L_train)))
+
+
+
+#################################################################################
+# Rotate Q or K #
+#################################################################################
+
+def rotate_half(x):
+ x = rearrange(x, '... (d r) -> ... d r', r = 2)
+ x1, x2 = x.unbind(dim = -1)
+ x = torch.stack((-x2, x1), dim = -1)
+ return rearrange(x, '... d r -> ... (d r)')
+
+
+
+#################################################################################
+# Core Vision RoPE #
+#################################################################################
+
+class VisionRotaryEmbedding(nn.Module):
+ def __init__(
+ self,
+ head_dim: int, # embed dimension for each head
+ custom_freqs: str = 'normal',
+ theta: int = 10000,
+ online_rope: bool = False,
+ max_cached_len: int = 1024,
+ max_pe_len_h: Optional[int] = None,
+ max_pe_len_w: Optional[int] = None,
+ decouple: bool = False,
+ ori_max_pe_len: Optional[int] = None,
+ ):
+ super().__init__()
+
+ dim = head_dim // 2
+ assert dim % 2 == 0 # accually, this is important
+ self.dim = dim
+ self.custom_freqs = custom_freqs.lower()
+ self.theta = theta
+ self.decouple = decouple
+ self.ori_max_pe_len = ori_max_pe_len
+
+ self.custom_freqs = custom_freqs.lower()
+ if not online_rope:
+ if self.custom_freqs in ['normal', 'scale1', 'scale2']:
+ freqs_h = 1. / (theta ** (torch.arange(0, dim, 2).float() / dim))
+ freqs_w = 1. / (theta ** (torch.arange(0, dim, 2).float() / dim))
+ else:
+ if decouple:
+ freqs_h = self.get_1d_rope_freqs(theta, dim, max_pe_len_h, ori_max_pe_len)
+ freqs_w = self.get_1d_rope_freqs(theta, dim, max_pe_len_w, ori_max_pe_len)
+ else:
+ max_pe_len = max(max_pe_len_h, max_pe_len_w)
+ freqs_h = self.get_1d_rope_freqs(theta, dim, max_pe_len, ori_max_pe_len)
+ freqs_w = self.get_1d_rope_freqs(theta, dim, max_pe_len, ori_max_pe_len)
+
+ self.register_buffer('freqs_h', freqs_h, persistent=False)
+ self.register_buffer('freqs_w', freqs_w, persistent=False)
+
+ if max_pe_len_h != None and max_pe_len_w != None and ori_max_pe_len != None:
+ attn_factor = 1.0
+ scale = torch.clamp_min(torch.tensor(max(max_pe_len_h, max_pe_len_w)) / ori_max_pe_len, 1.0) # dynamic scale
+ self.mscale = get_mscale(scale).to(scale) * attn_factor # Get n-d magnitude scaling corrected for interpolation
+ self.proportion1 = get_proportion(max(max_pe_len_h, max_pe_len_w), ori_max_pe_len)
+ self.proportion2 = get_proportion(max_pe_len_h * max_pe_len_w, ori_max_pe_len ** 2)
+
+
+ freqs_h_cached = torch.einsum('..., f -> ... f', torch.arange(max_cached_len), self.freqs_h)
+ freqs_h_cached = repeat(freqs_h_cached, '... n -> ... (n r)', r = 2)
+ self.register_buffer('freqs_h_cached', freqs_h_cached, persistent=False)
+ freqs_w_cached = torch.einsum('..., f -> ... f', torch.arange(max_cached_len), self.freqs_w)
+ freqs_w_cached = repeat(freqs_w_cached, '... n -> ... (n r)', r = 2)
+ self.register_buffer('freqs_w_cached', freqs_w_cached, persistent=False)
+
+
+ def get_1d_rope_freqs(self, theta, dim, max_pe_len, ori_max_pe_len):
+ # scaling operations for extrapolation
+ assert isinstance(ori_max_pe_len, int)
+ # scale = max_pe_len / ori_max_pe_len
+ if not isinstance(max_pe_len, torch.Tensor):
+ max_pe_len = torch.tensor(max_pe_len)
+ scale = torch.clamp_min(max_pe_len / ori_max_pe_len, 1.0) # dynamic scale
+
+ if self.custom_freqs == 'linear': # equal to position interpolation
+ freqs = 1. / torch.einsum('..., f -> ... f', scale, theta ** (torch.arange(0, dim, 2).float() / dim))
+ elif self.custom_freqs == 'ntk-aware' or self.custom_freqs == 'ntk-aware-pro1' or self.custom_freqs == 'ntk-aware-pro2':
+ freqs = 1. / torch.pow(
+ find_newbase_ntk(dim, theta, scale).view(-1, 1),
+ (torch.arange(0, dim, 2).to(scale).float() / dim)
+ ).squeeze()
+ elif self.custom_freqs == 'ntk-by-parts':
+ #Interpolation constants found experimentally for LLaMA (might not be totally optimal though)
+ #Do not change unless there is a good reason for doing so!
+ beta_0 = 1.25
+ beta_1 = 0.75
+ gamma_0 = 16
+ gamma_1 = 2
+ ntk_factor = 1
+ extrapolation_factor = 1
+
+ #Three RoPE extrapolation/interpolation methods
+ freqs_base = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
+ freqs_linear = 1.0 / torch.einsum('..., f -> ... f', scale, (theta ** (torch.arange(0, dim, 2).to(scale).float() / dim)))
+ freqs_ntk = 1. / torch.pow(
+ find_newbase_ntk(dim, theta, scale).view(-1, 1),
+ (torch.arange(0, dim, 2).to(scale).float() / dim)
+ ).squeeze()
+
+ #Combine NTK and Linear
+ low, high = find_correction_range(beta_0, beta_1, dim, theta, ori_max_pe_len)
+ freqs_mask = (1 - linear_ramp_mask(low, high, dim // 2).to(scale)) * ntk_factor
+ freqs = freqs_linear * (1 - freqs_mask) + freqs_ntk * freqs_mask
+
+ #Combine Extrapolation and NTK and Linear
+ low, high = find_correction_range(gamma_0, gamma_1, dim, theta, ori_max_pe_len)
+ freqs_mask = (1 - linear_ramp_mask(low, high, dim // 2).to(scale)) * extrapolation_factor
+ freqs = freqs * (1 - freqs_mask) + freqs_base * freqs_mask
+
+ elif self.custom_freqs == 'yarn':
+ #Interpolation constants found experimentally for LLaMA (might not be totally optimal though)
+ #Do not change unless there is a good reason for doing so!
+ beta_fast = 32
+ beta_slow = 1
+ extrapolation_factor = 1
+
+ freqs_extrapolation = 1.0 / (theta ** (torch.arange(0, dim, 2).to(scale).float() / dim))
+ freqs_interpolation = 1.0 / torch.einsum('..., f -> ... f', scale, (theta ** (torch.arange(0, dim, 2).to(scale).float() / dim)))
+
+ low, high = find_correction_range(beta_fast, beta_slow, dim, theta, ori_max_pe_len)
+ freqs_mask = (1 - linear_ramp_mask(low, high, dim // 2).to(scale).float()) * extrapolation_factor # Get n-d rotational scaling corrected for extrapolation
+ freqs = freqs_interpolation * (1 - freqs_mask) + freqs_extrapolation * freqs_mask
+ else:
+ raise ValueError(f'Unknown modality {self.custom_freqs}. Only support normal, linear, ntk-aware, ntk-by-parts, yarn!')
+ return freqs
+
+
+ def online_get_2d_rope_from_grid(self, grid, size):
+ '''
+ grid: (B, 2, N)
+ N = H * W
+ the first dimension represents width, and the second reprensents height
+ e.g., [0. 1. 2. 3. 0. 1. 2. 3. 0. 1. 2. 3.]
+ [0. 0. 0. 0. 1. 1. 1. 1. 2. 2. 2. 2.]
+ size: (B, 1, 2), h goes first and w goes last
+ '''
+ size = size.squeeze() # (B, 1, 2) -> (B, 2)
+ if self.decouple:
+ size_h = size[:, 0]
+ size_w = size[:, 1]
+ freqs_h = self.get_1d_rope_freqs(self.theta, self.dim, size_h, self.ori_max_pe_len)
+ freqs_w = self.get_1d_rope_freqs(self.theta, self.dim, size_w, self.ori_max_pe_len)
+ else:
+ size_max = torch.max(size[:, 0], size[:, 1])
+ freqs_h = self.get_1d_rope_freqs(self.theta, self.dim, size_max, self.ori_max_pe_len)
+ freqs_w = self.get_1d_rope_freqs(self.theta, self.dim, size_max, self.ori_max_pe_len)
+ freqs_w = grid[:, 0][..., None] * freqs_w[:, None, :]
+ freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2)
+
+ freqs_h = grid[:, 1][..., None] * freqs_h[:, None, :]
+ freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2)
+
+ freqs = torch.cat([freqs_h, freqs_w], dim=-1) # (B, N, D)
+
+ if self.custom_freqs == 'yarn':
+ freqs_cos = freqs.cos() * self.mscale[:, None, None]
+ freqs_sin = freqs.sin() * self.mscale[:, None, None]
+ elif self.custom_freqs == 'ntk-aware-pro1':
+ freqs_cos = freqs.cos() * self.proportion1[:, None, None]
+ freqs_sin = freqs.sin() * self.proportion1[:, None, None]
+ elif self.custom_freqs == 'ntk-aware-pro2':
+ freqs_cos = freqs.cos() * self.proportion2[:, None, None]
+ freqs_sin = freqs.sin() * self.proportion2[:, None, None]
+ else:
+ freqs_cos = freqs.cos()
+ freqs_sin = freqs.sin()
+
+ return freqs_cos, freqs_sin
+
+ @lru_cache()
+ def get_2d_rope_from_grid(self, grid):
+ '''
+ grid: (B, 2, N)
+ N = H * W
+ the first dimension represents width, and the second reprensents height
+ e.g., [0. 1. 2. 3. 0. 1. 2. 3. 0. 1. 2. 3.]
+ [0. 0. 0. 0. 1. 1. 1. 1. 2. 2. 2. 2.]
+ '''
+ freqs_h = torch.einsum('..., f -> ... f', grid[:, 0], self.freqs_h)
+ freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2)
+ freqs_w = torch.einsum('..., f -> ... f', grid[:, 1], self.freqs_w)
+ freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2)
+
+ freqs = torch.cat([freqs_h, freqs_w], dim=-1) # (B, N, D)
+
+ if self.custom_freqs == 'yarn':
+ freqs_cos = freqs.cos() * self.mscale
+ freqs_sin = freqs.sin() * self.mscale
+ elif self.custom_freqs in ['ntk-aware-pro1', 'scale1']:
+ freqs_cos = freqs.cos() * self.proportion1
+ freqs_sin = freqs.sin() * self.proportion1
+ elif self.custom_freqs in ['ntk-aware-pro2', 'scale2']:
+ freqs_cos = freqs.cos() * self.proportion2
+ freqs_sin = freqs.sin() * self.proportion2
+ else:
+ freqs_cos = freqs.cos()
+ freqs_sin = freqs.sin()
+
+ return freqs_cos, freqs_sin
+
+ @lru_cache()
+ def get_cached_2d_rope_from_grid(self, grid: torch.Tensor):
+ '''
+ grid: (B, 2, N)
+ N = H * W
+ the first dimension represents width, and the second reprensents height
+ e.g., [0. 1. 2. 3. 0. 1. 2. 3. 0. 1. 2. 3.]
+ [0. 0. 0. 0. 1. 1. 1. 1. 2. 2. 2. 2.]
+ '''
+ if len(grid.shape) == 3: # (B, 2, N)
+ freqs_h, freqs_w = self.freqs_h_cached[grid[:, 0]], self.freqs_w_cached[grid[:, 1]]
+ elif len(grid.shape) == 2: # (2, N)
+ freqs_h, freqs_w = self.freqs_h_cached[grid[0]], self.freqs_w_cached[grid[1]]
+ freqs = torch.cat([freqs_h, freqs_w], dim=-1) # (B, N, D)
+
+ if self.custom_freqs == 'yarn':
+ freqs_cos = freqs.cos() * self.mscale
+ freqs_sin = freqs.sin() * self.mscale
+ elif self.custom_freqs in ['ntk-aware-pro1', 'scale1']:
+ freqs_cos = freqs.cos() * self.proportion1
+ freqs_sin = freqs.sin() * self.proportion1
+ elif self.custom_freqs in ['ntk-aware-pro2', 'scale2']:
+ freqs_cos = freqs.cos() * self.proportion2
+ freqs_sin = freqs.sin() * self.proportion2
+ else:
+ freqs_cos = freqs.cos()
+ freqs_sin = freqs.sin()
+
+ return freqs_cos, freqs_sin
+
+
+ def forward(self, x, grid):
+ '''
+ x: (B, n_head, N, D)
+ grid: (B, 2, N)
+ '''
+ # freqs_cos, freqs_sin = self.get_2d_rope_from_grid(grid)
+ # freqs_cos, freqs_sin = freqs_cos.unsqueeze(1), freqs_sin.unsqueeze(1)
+ # using cache to accelerate, this is the same with the above codes:
+ freqs_cos, freqs_sin = self.get_cached_2d_rope_from_grid(grid)
+ freqs_cos, freqs_sin = freqs_cos.unsqueeze(1), freqs_sin.unsqueeze(1)
+ return x * freqs_cos + rotate_half(x) * freqs_sin
+
+
\ No newline at end of file
diff --git a/tim/models/utils/text_encoders.py b/tim/models/utils/text_encoders.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c74bb0aab40095f1cc5ed48678b6327a22537cd
--- /dev/null
+++ b/tim/models/utils/text_encoders.py
@@ -0,0 +1,63 @@
+import os
+import torch
+from transformers import T5EncoderModel, AutoModelForCausalLM, AutoTokenizer
+
+
+# load text-encoder
+def load_text_encoder(text_encoder_dir, device, weight_dtype):
+ os.environ["TOKENIZERS_PARALLELISM"] = "true"
+ tokenizer = AutoTokenizer.from_pretrained(text_encoder_dir)
+ if "gemma" in text_encoder_dir:
+ tokenizer.padding_side = "right"
+ text_encoder = AutoModelForCausalLM.from_pretrained(
+ text_encoder_dir,
+ attn_implementation="flash_attention_2",
+ device_map="cpu",
+ torch_dtype=weight_dtype,
+ ).model
+ elif "t5" in text_encoder_dir:
+ text_encoder = T5EncoderModel.from_pretrained(
+ text_encoder_dir,
+ attn_implementation="sdpa",
+ device_map="cpu",
+ torch_dtype=weight_dtype,
+ )
+ else:
+ raise NotImplementedError
+ text_encoder.requires_grad_(False)
+ text_encoder = text_encoder.eval().to(device=device, dtype=weight_dtype)
+
+ return text_encoder, tokenizer
+
+
+def encode_prompt(
+ tokenizer,
+ text_encoder,
+ device,
+ weight_dtype,
+ captions,
+ use_last_hidden_state,
+ max_seq_length=256,
+):
+ text_inputs = tokenizer(
+ captions,
+ padding="max_length",
+ max_length=max_seq_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ text_input_ids = text_inputs.input_ids.to(device)
+ prompt_masks = text_inputs.attention_mask.to(device)
+ with torch.no_grad(), torch.autocast("cuda", dtype=weight_dtype):
+ results = text_encoder(
+ input_ids=text_input_ids,
+ attention_mask=prompt_masks,
+ output_hidden_states=True,
+ )
+
+ if use_last_hidden_state:
+ prompt_embeds = results.last_hidden_state
+ else: # from Imagen paper
+ prompt_embeds = results.hidden_states[-2]
+
+ return prompt_embeds, prompt_masks
diff --git a/tim/models/vae/__init__.py b/tim/models/vae/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c77422771cd181cb32e720fbda1c900e37a7fd6
--- /dev/null
+++ b/tim/models/vae/__init__.py
@@ -0,0 +1,55 @@
+import torch
+from .dc_ae import MyAutoencoderDC as AutoencoderDC
+from .sd_vae import MyAutoencoderKL as AutoencoderKL
+
+# dc-ae
+def get_dc_ae(vae_dir, dtype, device):
+ dc_ae = AutoencoderDC.from_pretrained(vae_dir).to(dtype=dtype, device=device)
+ dc_ae.eval()
+ dc_ae.requires_grad_(False)
+ return dc_ae
+
+
+def dc_ae_encode(dc_ae, images):
+ with torch.no_grad():
+ z = dc_ae.encode(images).latent
+ latents = (z - dc_ae.mean) / dc_ae.std
+ return latents
+
+def dc_ae_decode(dc_ae, latents, slice_vae=False):
+ with torch.no_grad():
+ z = latents * dc_ae.std + dc_ae.mean
+ if slice_vae and z.size(0) > 1:
+ decoded_slices = [dc_ae._decode(z_slice) for z_slice in z.split(1)]
+ decoded = torch.cat(decoded_slices)
+ else:
+ decoded = dc_ae._decode(z)
+ images = decoded # decoded images
+ return images
+
+# sd-vae
+def get_sd_vae(vae_dir, dtype, device):
+ sd_vae = AutoencoderKL.from_pretrained(vae_dir).to(dtype=dtype, device=device)
+ sd_vae.eval()
+ sd_vae.requires_grad_(False)
+ return sd_vae
+
+def sd_vae_encode(sd_vae, images):
+ with torch.no_grad():
+ posterior = sd_vae.encode(images)
+ z = posterior.latent_dist.sample()
+ latents = (z - sd_vae.mean) / sd_vae.std
+ return latents
+
+def sd_vae_decode(sd_vae, latents, slice_vae=False):
+ with torch.no_grad():
+ z = latents * sd_vae.std + sd_vae.mean
+ if slice_vae and z.shape[0] > 1:
+ decoded_slices = [sd_vae._decode(z_slice).sample for z_slice in z.split(1)]
+ decoded = torch.cat(decoded_slices)
+ else:
+ decoded = sd_vae._decode(z).sample
+ return decoded
+
+
+
diff --git a/tim/models/vae/dc_ae.py b/tim/models/vae/dc_ae.py
new file mode 100644
index 0000000000000000000000000000000000000000..dab4ef4c0be13124ddaa8daf9a5a344bb56b8b76
--- /dev/null
+++ b/tim/models/vae/dc_ae.py
@@ -0,0 +1,178 @@
+import torch
+from torch.utils.checkpoint import checkpoint
+from diffusers.models.autoencoders.autoencoder_dc import Encoder, Decoder, AutoencoderDC
+
+
+class MyEncoder(Encoder):
+ def __init__(
+ self,
+ in_channels,
+ latent_channels,
+ attention_head_dim = 32,
+ block_type = "ResBlock",
+ block_out_channels = ...,
+ layers_per_block = ...,
+ qkv_multiscales = ...,
+ downsample_block_type = "pixel_unshuffle",
+ out_shortcut = True
+ ):
+ super().__init__(
+ in_channels, latent_channels, attention_head_dim, block_type, block_out_channels,
+ layers_per_block, qkv_multiscales, downsample_block_type, out_shortcut
+ )
+
+ def forward(self, hidden_states: torch.Tensor, use_checkpoint=False) -> torch.Tensor:
+ hidden_states = self.conv_in(hidden_states)
+ for down_block in self.down_blocks:
+ if use_checkpoint:
+ hidden_states = checkpoint(self.ckpt_wrapper(down_block), hidden_states)
+ else:
+ hidden_states = down_block(hidden_states)
+
+ if self.out_shortcut:
+ x = hidden_states.unflatten(1, (-1, self.out_shortcut_average_group_size))
+ x = x.mean(dim=2)
+ hidden_states = self.conv_out(hidden_states) + x
+ else:
+ hidden_states = self.conv_out(hidden_states)
+
+ return hidden_states
+
+ def ckpt_wrapper(self, module):
+ def ckpt_forward(*inputs):
+ outputs = module(*inputs)
+ return outputs
+ return ckpt_forward
+
+
+
+
+class MyDecoder(Decoder):
+ def __init__(
+ self,
+ in_channels,
+ latent_channels,
+ attention_head_dim = 32,
+ block_type = "ResBlock",
+ block_out_channels = ...,
+ layers_per_block = ...,
+ qkv_multiscales = ...,
+ norm_type = "rms_norm",
+ act_fn = "silu",
+ upsample_block_type = "pixel_shuffle",
+ in_shortcut = True
+ ):
+ super().__init__(
+ in_channels, latent_channels, attention_head_dim, block_type, block_out_channels,
+ layers_per_block, qkv_multiscales, norm_type, act_fn, upsample_block_type, in_shortcut
+ )
+
+ def forward(self, hidden_states: torch.Tensor, use_checkpoint=False) -> torch.Tensor:
+ if self.in_shortcut:
+ x = hidden_states.repeat_interleave(
+ self.in_shortcut_repeats, dim=1, output_size=hidden_states.shape[1] * self.in_shortcut_repeats
+ )
+ hidden_states = self.conv_in(hidden_states) + x
+ else:
+ hidden_states = self.conv_in(hidden_states)
+
+ for up_block in reversed(self.up_blocks):
+ if use_checkpoint:
+ hidden_states = checkpoint(self.ckpt_wrapper(up_block), hidden_states)
+ else:
+ hidden_states = up_block(hidden_states)
+
+ hidden_states = self.norm_out(hidden_states.movedim(1, -1)).movedim(-1, 1)
+ hidden_states = self.conv_act(hidden_states)
+ hidden_states = self.conv_out(hidden_states)
+ return hidden_states
+
+ def ckpt_wrapper(self, module):
+ def ckpt_forward(*inputs):
+ outputs = module(*inputs)
+ return outputs
+ return ckpt_forward
+
+
+
+class MyAutoencoderDC(AutoencoderDC):
+ def __init__(
+ self,
+ in_channels = 3,
+ latent_channels = 32,
+ attention_head_dim = 32,
+ encoder_block_types = "ResBlock",
+ decoder_block_types = "ResBlock",
+ encoder_block_out_channels = ...,
+ decoder_block_out_channels = ...,
+ encoder_layers_per_block = ...,
+ decoder_layers_per_block = ...,
+ encoder_qkv_multiscales = ...,
+ decoder_qkv_multiscales = ...,
+ upsample_block_type = "pixel_shuffle",
+ downsample_block_type = "pixel_unshuffle",
+ decoder_norm_types = "rms_norm",
+ decoder_act_fns = "silu",
+ scaling_factor = 1,
+ bn_momentum = 0.1,
+ ):
+ super().__init__(
+ in_channels, latent_channels, attention_head_dim, encoder_block_types,
+ decoder_block_types, encoder_block_out_channels, decoder_block_out_channels,
+ encoder_layers_per_block, decoder_layers_per_block, encoder_qkv_multiscales,
+ decoder_qkv_multiscales, upsample_block_type, downsample_block_type,
+ decoder_norm_types, decoder_act_fns, scaling_factor
+ )
+
+ self.encoder = MyEncoder(
+ in_channels=in_channels,
+ latent_channels=latent_channels,
+ attention_head_dim=attention_head_dim,
+ block_type=encoder_block_types,
+ block_out_channels=encoder_block_out_channels,
+ layers_per_block=encoder_layers_per_block,
+ qkv_multiscales=encoder_qkv_multiscales,
+ downsample_block_type=downsample_block_type,
+ )
+ self.decoder = MyDecoder(
+ in_channels=in_channels,
+ latent_channels=latent_channels,
+ attention_head_dim=attention_head_dim,
+ block_type=decoder_block_types,
+ block_out_channels=decoder_block_out_channels,
+ layers_per_block=decoder_layers_per_block,
+ qkv_multiscales=decoder_qkv_multiscales,
+ norm_type=decoder_norm_types,
+ act_fn=decoder_act_fns,
+ upsample_block_type=upsample_block_type,
+ )
+ self.bn = torch.nn.BatchNorm2d(
+ latent_channels, eps=1e-4, momentum=bn_momentum, affine=False, track_running_stats=True
+ )
+ self.bn.reset_running_stats()
+ self.init_bn()
+
+
+ def init_bn(self):
+ # self.bn.running_mean = torch.zeros_like(self.bn.running_mean).to(torch.float64)
+ # self.bn.running_var = torch.ones_like(self.bn.running_var).to(torch.float64) / self.config.scaling_factor ** 2
+ self.bn.running_mean = torch.zeros_like(self.bn.running_mean)
+ self.bn.running_var = torch.ones_like(self.bn.running_var) / self.config.scaling_factor ** 2
+ print(self.config.scaling_factor, self.bn.running_var.flatten())
+
+ @property
+ def mean(self):
+ mean = self.bn.running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
+ return mean
+
+ @property
+ def std(self):
+ std = self.bn.running_var.sqrt().unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
+ return std
+
+ def forward(self, x: torch.Tensor, use_checkpoint=False) -> torch.Tensor:
+ z = self.encoder(x, use_checkpoint)
+ latent = self.bn(z)
+ recon = self.decoder(z, use_checkpoint)
+ posterior = None
+ return posterior, latent, recon
diff --git a/tim/models/vae/sd_vae.py b/tim/models/vae/sd_vae.py
new file mode 100644
index 0000000000000000000000000000000000000000..1630d9d24d1c1ba57d3197b6c5b03244b373e885
--- /dev/null
+++ b/tim/models/vae/sd_vae.py
@@ -0,0 +1,206 @@
+import torch
+from torch.utils.checkpoint import checkpoint
+from diffusers.models.autoencoders.autoencoder_kl import Encoder, Decoder, AutoencoderKL
+from typing import Optional
+
+class MyEncoder(Encoder):
+ def __init__(
+ self,
+ in_channels = 3,
+ out_channels = 3,
+ down_block_types = ...,
+ block_out_channels = ...,
+ layers_per_block = 2,
+ norm_num_groups = 32,
+ act_fn = "silu",
+ double_z = True,
+ mid_block_add_attention=True
+ ):
+ super().__init__(
+ in_channels, out_channels, down_block_types, block_out_channels,
+ layers_per_block, norm_num_groups, act_fn, double_z, mid_block_add_attention
+ )
+
+ def forward(self, sample: torch.Tensor) -> torch.Tensor:
+ r"""The forward method of the `Encoder` class."""
+
+ sample = self.conv_in(sample)
+
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
+ # down
+ for down_block in self.down_blocks:
+ sample = checkpoint(self.ckpt_wrapper(down_block), sample)
+ # middle
+ sample = checkpoint(self.ckpt_wrapper(self.mid_block), sample)
+
+ else:
+ # down
+ for down_block in self.down_blocks:
+ sample = down_block(sample)
+
+ # middle
+ sample = self.mid_block(sample)
+
+ # post-process
+ sample = self.conv_norm_out(sample)
+ sample = self.conv_act(sample)
+ sample = self.conv_out(sample)
+
+ return sample
+
+ def ckpt_wrapper(self, module):
+ def ckpt_forward(*inputs):
+ outputs = module(*inputs)
+ return outputs
+ return ckpt_forward
+
+
+class MyDecoder(Decoder):
+ def __init__(
+ self,
+ in_channels = 3,
+ out_channels = 3,
+ up_block_types = ...,
+ block_out_channels = ...,
+ layers_per_block = 2,
+ norm_num_groups = 32,
+ act_fn = "silu",
+ norm_type = "group",
+ mid_block_add_attention=True
+ ):
+ super().__init__(
+ in_channels, out_channels, up_block_types, block_out_channels,
+ layers_per_block, norm_num_groups, act_fn, norm_type, mid_block_add_attention
+ )
+
+ def forward(
+ self,
+ sample: torch.Tensor,
+ latent_embeds: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ r"""The forward method of the `Decoder` class."""
+
+ sample = self.conv_in(sample)
+
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
+ # middle
+ sample = checkpoint(self.ckpt_wrapper(self.mid_block), sample, latent_embeds)
+ sample = sample.to(upscale_dtype)
+
+ # up
+ for up_block in self.up_blocks:
+ sample = checkpoint(self.ckpt_wrapper(up_block), sample, latent_embeds)
+ else:
+ # middle
+ sample = self.mid_block(sample, latent_embeds)
+ sample = sample.to(upscale_dtype)
+
+ # up
+ for up_block in self.up_blocks:
+ sample = up_block(sample, latent_embeds)
+
+ # post-process
+ if latent_embeds is None:
+ sample = self.conv_norm_out(sample)
+ else:
+ sample = self.conv_norm_out(sample, latent_embeds)
+ sample = self.conv_act(sample)
+ sample = self.conv_out(sample)
+
+ return sample
+
+ def ckpt_wrapper(self, module):
+ def ckpt_forward(*inputs):
+ outputs = module(*inputs)
+ return outputs
+ return ckpt_forward
+
+
+class MyAutoencoderKL(AutoencoderKL):
+ def __init__(
+ self,
+ in_channels = 3,
+ out_channels = 3,
+ down_block_types = ...,
+ up_block_types = ...,
+ block_out_channels = ...,
+ layers_per_block = 1,
+ act_fn = "silu",
+ latent_channels = 4,
+ norm_num_groups = 32,
+ sample_size = 32,
+ scaling_factor = 0.18215,
+ shift_factor = None,
+ latents_mean = None,
+ latents_std = None,
+ force_upcast = True,
+ use_quant_conv = True,
+ use_post_quant_conv = True,
+ mid_block_add_attention = True,
+ bn_momentum = 0.1,
+ ):
+ super().__init__(
+ in_channels, out_channels, down_block_types, up_block_types, block_out_channels,
+ layers_per_block, act_fn, latent_channels, norm_num_groups, sample_size,
+ scaling_factor, shift_factor, latents_mean, latents_std, force_upcast,
+ use_quant_conv, use_post_quant_conv, mid_block_add_attention
+ )
+ self.encoder = MyEncoder(
+ in_channels=in_channels,
+ out_channels=latent_channels,
+ down_block_types=down_block_types,
+ block_out_channels=block_out_channels,
+ layers_per_block=layers_per_block,
+ act_fn=act_fn,
+ norm_num_groups=norm_num_groups,
+ double_z=True,
+ mid_block_add_attention=mid_block_add_attention,
+ )
+
+ # pass init params to Decoder
+ self.decoder = MyDecoder(
+ in_channels=latent_channels,
+ out_channels=out_channels,
+ up_block_types=up_block_types,
+ block_out_channels=block_out_channels,
+ layers_per_block=layers_per_block,
+ norm_num_groups=norm_num_groups,
+ act_fn=act_fn,
+ mid_block_add_attention=mid_block_add_attention,
+ )
+ self.bn = torch.nn.BatchNorm2d(
+ latent_channels, eps=1e-4, momentum=bn_momentum, affine=False, track_running_stats=True
+ )
+ self.bn.reset_running_stats()
+ self.init_bn()
+
+
+ def init_bn(self):
+ # self.bn.running_mean = torch.zeros_like(self.bn.running_mean).to(torch.float64)
+ # self.bn.running_var = torch.ones_like(self.bn.running_var).to(torch.float64) / self.config.scaling_factor ** 2
+ self.bn.running_mean = torch.zeros_like(self.bn.running_mean)
+ self.bn.running_var = torch.ones_like(self.bn.running_var) / self.config.scaling_factor ** 2
+
+ @property
+ def mean(self):
+ mean = self.bn.running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
+ return mean
+
+ @property
+ def std(self):
+ std = self.bn.running_var.sqrt().unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
+ return std
+
+
+ def forward(self, x: torch.Tensor, use_checkpoint=False):
+ self.encoder.gradient_checkpointing = use_checkpoint
+ self.decoder.gradient_checkpointing = use_checkpoint
+ posterior = self.encode(x).latent_dist
+ z = posterior.sample()
+ latent = self.bn(z)
+ recon = self.decode(z).sample
+ return posterior, latent, recon
+
+
+
diff --git a/tim/schedulers/transition.py b/tim/schedulers/transition.py
new file mode 100644
index 0000000000000000000000000000000000000000..7eb9872ebe6c8e89d5582c10dda96186a7fc9721
--- /dev/null
+++ b/tim/schedulers/transition.py
@@ -0,0 +1,329 @@
+from typing import Callable
+import torch
+import torch.nn.functional as F
+from copy import deepcopy
+from .transports import Transport
+from tim.models.utils.funcs import expand_t_like_x
+
+
+def mean_flat(x):
+ """
+ Take the mean over all non-batch dimensions.
+ """
+ return torch.mean(x, dim=list(range(1, len(x.size()))))
+
+
+def sum_flat(x):
+ """
+ Take the mean over all non-batch dimensions.
+ """
+ return torch.sum(x, dim=list(range(1, len(x.size()))))
+
+
+class TransitionSchedule:
+ def __init__(
+ self,
+ transport: Transport,
+ diffusion_ratio: float = 0.0,
+ consistency_ratio: float = 0.0,
+ derivative_type: str = "dde",
+ differential_epsilon: float = 0.005,
+ weight_t_and_r: bool = True,
+ weight_time_type: str = "constant",
+ weight_time_tangent: bool = False,
+ weight_time_sigmoid: bool = False,
+ ):
+ self.transport = transport
+ self.diffusion_ratio = diffusion_ratio
+ self.consistency_ratio = consistency_ratio
+ self.derivative_type = derivative_type
+ self.differential_epsilon = differential_epsilon
+ self.weight_t_and_r = weight_t_and_r
+ self.weight_time_type = weight_time_type
+ self.weight_time_tangent = weight_time_tangent
+ self.weight_time_sigmoid = weight_time_sigmoid
+
+ def sample_t_and_r(self, batch_size, dtype, device):
+ t_1 = self.transport.sample_t(batch_size=batch_size, dtype=dtype, device=device)
+ t_2 = self.transport.sample_t(batch_size=batch_size, dtype=dtype, device=device)
+ # t is the larger one, and r is the smaller one
+ t = torch.maximum(t_1, t_2)
+ r = torch.minimum(t_1, t_2)
+ # some samples with t=r, corresponding to diffusion training
+ n_diffusion = round(self.diffusion_ratio * len(t))
+ r[:n_diffusion] = t[:n_diffusion]
+ # some samples with r=0, corresponding to consistency training
+ n_consistency = round(self.consistency_ratio * len(t))
+ if n_consistency != 0:
+ r[-n_consistency:] = self.transport.T_min
+ return t, r, n_diffusion
+
+ def prepare_input(self, batch_size, x, z):
+ # sample timestep according to log-normal distribution of sigmas following EDM
+ t, r, n_diffusion = self.sample_t_and_r(
+ batch_size=batch_size, dtype=x.dtype, device=x.device
+ )
+ # reshape (B, ) -> (B, 1, 1, 1)
+ t, r = expand_t_like_x(t, x), expand_t_like_x(r, x)
+ # prepere inputs
+ alpha_t, sigma_t, d_alpha_t, d_sigma_t = self.transport.interpolant(t)
+ x_t = alpha_t * x + sigma_t * z
+ v_t = d_alpha_t * x + d_sigma_t * z
+ return x_t, v_t, t, r, n_diffusion
+
+ def model_forward(self, model, x_t, t, r, model_kwargs, rng_state):
+ # model_input
+ t_input = self.transport.c_noise(t.flatten())
+ r_input = self.transport.c_noise(r.flatten())
+ # model_output
+ torch.cuda.set_rng_state(rng_state)
+ model_output = model(x_t, t_input, r_input, **model_kwargs)
+ return model_output
+
+ @torch.no_grad()
+ def jvp_derivative(
+ self, model, x_t, v_t, t, r, model_kwargs, rng_state, n_diffusion
+ ):
+ if n_diffusion == x_t.size(0):
+ return 0
+ _dF_dv_dt = torch.zeros_like(x_t)
+ # only calculate the dF_dv_dt when t!=r
+ x_t, v_t, t, r = (
+ x_t[n_diffusion:],
+ v_t[n_diffusion:],
+ t[n_diffusion:],
+ r[n_diffusion:],
+ )
+ for k, v in model_kwargs.items():
+ if type(v) == torch.Tensor:
+ model_kwargs[k] = model_kwargs[k][n_diffusion:]
+ model_kwargs["return_zs"] = False
+
+ def model_jvp(x_t, t, r):
+ model_kwargs["attn_type"] = "vanilla_attn"
+ model_kwargs["jvp"] = True
+ t_input = self.transport.c_noise(t.flatten())
+ r_input = self.transport.c_noise(r.flatten())
+ return model(x_t, t_input, r_input, **model_kwargs)
+
+ torch.cuda.set_rng_state(rng_state)
+ F_pred, dF_dv_dt = torch.func.jvp(
+ lambda x_t, t, r: model_jvp(x_t, t, r),
+ (x_t, t, r),
+ (v_t, torch.ones_like(t), torch.zeros_like(r)),
+ )
+ _dF_dv_dt[n_diffusion:] = dF_dv_dt
+ return _dF_dv_dt
+
+ @torch.no_grad()
+ def dde_derivative(self, model, x, z, t, r, model_kwargs, rng_state, n_diffusion):
+ if n_diffusion == x.size(0):
+ return 0
+ _dF_dv_dt = torch.zeros_like(x)
+ # only calculate the dF_dv_dt when t!=r
+ x, z, t, r = x[n_diffusion:], z[n_diffusion:], t[n_diffusion:], r[n_diffusion:]
+ for k, v in model_kwargs.items():
+ if type(v) == torch.Tensor:
+ model_kwargs[k] = model_kwargs[k][n_diffusion:]
+ model_kwargs["return_zs"] = False
+ model_kwargs["jvp"] = True
+
+ def xfunc(t):
+ alpha_t, sigma_t, _, _ = self.transport.interpolant(t)
+ x_t = alpha_t * x + sigma_t * z
+ return self.model_forward(model, x_t, t, r, model_kwargs, rng_state)
+
+ epsilon = self.differential_epsilon
+ fc1_dt = 1 / (2 * epsilon)
+ dF_dv_dt = xfunc(t + epsilon) * fc1_dt - xfunc(t - epsilon) * fc1_dt
+ _dF_dv_dt[n_diffusion:] = dF_dv_dt
+ return _dF_dv_dt
+
+ def get_enhanced_target(self, model, x_t, t, model_kwargs, null_kwargs, rng_state):
+ with torch.no_grad():
+ t_input = self.transport.c_noise(t.flatten())
+ if self.transport.w_cond > 0:
+ F_t_cond = self.model_forward(
+ model, x_t, t_input, t_input, model_kwargs, rng_state
+ )
+ else:
+ F_t_cond = 0
+ F_t_uncond = self.model_forward(
+ model, x_t, t_input, t_input, null_kwargs, rng_state
+ )
+ return F_t_cond, F_t_uncond
+
+ def time_weighting(self, t, r, n_diffusion):
+ if self.weight_time_tangent:
+ t, r = torch.tan(t), torch.tan(r)
+ elif self.weight_time_sigmoid:
+ t, r = t / (1 - t), r / (1 - r)
+ if self.weight_t_and_r:
+ delta_t = (t - r).flatten()
+ else:
+ delta_t = t.flatten()
+ if self.weight_time_type == "constant":
+ weight = torch.ones_like(delta_t)
+ elif self.weight_time_type == "reciprocal":
+ weight = 1 / (delta_t + self.transport.sigma_d)
+ elif self.weight_time_type == "sqrt":
+ weight = 1 / (delta_t + self.transport.sigma_d).sqrt()
+ elif self.weight_time_type == "square":
+ weight = 1 / (delta_t + self.transport.sigma_d) ** 2
+ elif self.weight_time_type == "Soft-Min-SNR":
+ weight = 1 / (delta_t**2 + self.transport.sigma_d**2)
+ else:
+ raise NotImplementedError
+ weight[:n_diffusion] = 1.0
+ return weight
+
+ def adaptive_weighting(self, loss, eps=10e-6):
+ weight = 1 / (loss.detach() + eps)
+ return weight
+
+ def __call__(
+ self,
+ model,
+ ema_model,
+ unwrapped_model,
+ batch_size,
+ x,
+ z,
+ model_kwargs,
+ use_dir_loss=False,
+ h_target=None,
+ ema_kwargs={},
+ null_kwargs={},
+ ):
+ # prepare model input
+ x_t, v_t, t, r, n_diffusion = self.prepare_input(batch_size, x, z)
+
+ rng_state = torch.cuda.get_rng_state()
+ # get prediction
+ F_pred, h_proj = self.model_forward(model, x_t, t, r, model_kwargs, rng_state)
+ # get target
+ if self.derivative_type == "jvp":
+ dF_dv_dt = self.jvp_derivative(
+ unwrapped_model, x_t, v_t, t, r, model_kwargs, rng_state, n_diffusion
+ )
+ else:
+ dF_dv_dt = self.dde_derivative(
+ unwrapped_model, x, z, t, r, model_kwargs, rng_state, n_diffusion
+ )
+
+ if self.transport.enhance_target:
+ F_t_cond, F_t_uncond = self.get_enhanced_target(
+ ema_model, x_t, t, ema_kwargs, null_kwargs, rng_state
+ )
+ enhance_target = True
+ else:
+ F_t_cond, F_t_uncond, enhance_target = 0, 0, False
+ F_target = self.transport.target(
+ x_t, v_t, x, z, t, r, dF_dv_dt, F_t_cond, F_t_uncond, enhance_target
+ )
+ denoising_loss = mean_flat((F_pred - F_target) ** 2)
+ denoising_loss = torch.nan_to_num(
+ denoising_loss, nan=0, posinf=1e5, neginf=-1e5
+ )
+
+ if use_dir_loss:
+ directional_loss = mean_flat(
+ 1 - F.cosine_similarity(F_pred, F_target, dim=1)
+ )
+ directional_loss = torch.nan_to_num(
+ directional_loss, nan=0, posinf=1e5, neginf=-1e5
+ )
+ denoising_loss += directional_loss
+
+ weight = self.time_weighting(t, r, n_diffusion) * self.adaptive_weighting(
+ denoising_loss
+ )
+ weighted_loss = weight * denoising_loss
+ weighted_loss = weighted_loss.mean()
+
+ proj_loss = mean_flat(1 - torch.cosine_similarity(h_proj, h_target, dim=-1))
+ proj_loss = torch.nan_to_num(proj_loss, nan=0, posinf=1e5, neginf=-1e5)
+ proj_loss = proj_loss.mean()
+
+ loss_dict = dict(
+ weighted_loss=weighted_loss.detach().item(),
+ denoising_loss=denoising_loss.mean().detach().item(),
+ proj_loss=proj_loss.detach().item(),
+ )
+
+ return weighted_loss, proj_loss, loss_dict
+
+ def forward_with_cfg(
+ self, model, x_t, t, r, y, y_null, cfg_scale, cfg_low, cfg_high
+ ):
+ apply_cfg = cfg_scale > 1.0 and t > cfg_low and t < cfg_high
+ if apply_cfg:
+ x_cur = torch.cat([x_t] * 2, dim=0)
+ y_cur = torch.cat([y, y_null], dim=0)
+ else:
+ x_cur = x_t
+ y_cur = y
+ t_cur = torch.ones(x_cur.size(0)).to(x_cur) * self.transport.c_noise(t)
+ r_cur = torch.ones(x_cur.size(0)).to(x_cur) * self.transport.c_noise(r)
+ F_pred = model(x_cur, t_cur, r_cur, y_cur)
+ if apply_cfg:
+ F_cond, F_uncond = F_pred.chunk(2)
+ F_pred = F_uncond + cfg_scale * (F_cond - F_uncond)
+ return F_pred
+
+ @torch.no_grad()
+ def sample(
+ self,
+ model,
+ y,
+ y_null,
+ z,
+ T_max,
+ T_min=0.0,
+ num_steps=4,
+ cfg_scale=1.0,
+ cfg_low=0.0,
+ cfg_high=1.0,
+ stochasticity_ratio=0.0,
+ sample_type: str = "transition", # 'transition', diffusion
+ step_callback: Callable[[int], None] | None = None,
+ ):
+ _dtype = z.dtype
+ t_steps = torch.linspace(T_max, T_min, num_steps + 1, dtype=torch.float64).to(z)
+ cfg_low = cfg_low * T_max
+ cfg_high = cfg_high * T_max
+
+ x_cur = deepcopy(z).to(torch.float64)
+ samples = [z]
+ for i, (t_cur, t_next) in enumerate(zip(t_steps[:-1], t_steps[1:])):
+ # x_{N} -> x_{N-1} -> ... -> x_{n+1} -> x_{n} -> x_{n-1} -> ... -> x_{1} -> x_{0}
+ if sample_type == "transition":
+ _t_next = t_next
+ elif sample_type == "ddiffusion":
+ _t_next = t_cur
+ else:
+ raise
+ F_pred = self.forward_with_cfg(
+ model,
+ x_cur.to(_dtype),
+ t_cur,
+ _t_next,
+ y,
+ y_null,
+ cfg_scale,
+ cfg_low,
+ cfg_high,
+ ).to(torch.float64)
+ if stochasticity_ratio > 0.0 and t_cur < T_max and _t_next > T_min:
+ s_ratio = stochasticity_ratio
+ else:
+ s_ratio = 0.0
+ x_next = self.transport.from_x_t_to_x_r(
+ x_cur, t_cur, t_next, F_pred, s_ratio
+ )
+ samples.append(x_next)
+ x_cur = x_next
+ if step_callback is not None:
+ step_callback(i)
+
+ return torch.stack(samples, dim=0).to(torch.float32)
diff --git a/tim/schedulers/transports.py b/tim/schedulers/transports.py
new file mode 100644
index 0000000000000000000000000000000000000000..789f17393b62d8ea1d237ff73319a10d0994bd91
--- /dev/null
+++ b/tim/schedulers/transports.py
@@ -0,0 +1,342 @@
+import torch
+from typing import Optional
+
+class Transport:
+ def __init__(self, sigma_d, T_max, T_min, enhance_target=False, w_gt=1.0, w_cond=0.0, w_start=0.0, w_end=1.0):
+ self.sigma_d = sigma_d
+ self.T_max = T_max
+ self.T_min = T_min
+ self.enhance_target = enhance_target
+ self.w_gt = w_gt
+ self.w_cond = w_cond
+ self.w_start = w_start
+ self.w_end = w_end
+
+ def sample_t(self, batch_size, dtype, device):
+ pass
+ def c_noise(self, t: torch.Tensor):
+ pass
+ def interpolant(self, t: torch.Tensor):
+ pass
+ def target(self, x_t: torch.Tensor, v_t: torch.Tensor, x: torch.Tensor, z: torch.Tensor, t: torch.Tensor, r: torch.Tensor, dF_dv_dt: torch.Tensor, F_t_cond: torch.Tensor, F_t_uncond: torch.Tensor):
+ pass
+ def from_x_t_to_x_r(self, x_t: torch.Tensor, t: torch.Tensor, r: torch.Tensor, F: torch.Tensor):
+ pass
+
+class OT_FM(Transport):
+ def __init__(self, P_mean=0.0, P_std=1.0, sigma_d=1.0, T_max=1.0, T_min=0.0, enhance_target=False, w_gt=1.0, w_cond=0.0, w_start=0.0, w_end=1.0):
+ '''
+ Flow-matching with linear path formulation from the paper:
+ "SiT: Exploring Flow and Diffusion-based Generative Models with Scalable Interpolant Transformers"
+ '''
+ self.P_mean = P_mean
+ self.P_std = P_std
+ super().__init__(sigma_d, T_max, T_min, enhance_target, w_gt, w_cond, w_start, w_end)
+
+ def interpolant(self, t: torch.Tensor):
+ alpha_t = 1 - t
+ sigma_t = t
+ d_alpha_t = -1
+ d_sigma_t = 1
+ return alpha_t, sigma_t, d_alpha_t, d_sigma_t
+
+ def sample_t(self, batch_size, dtype, device):
+ rnd_normal = torch.randn((batch_size, ), dtype=dtype, device=device)
+ sigma = (rnd_normal * self.P_std + self.P_mean).exp()
+ t = sigma / (1 + sigma) # [0, 1]
+ return t
+
+ def c_noise(self, t: torch.Tensor):
+ return t
+
+ def target(
+ self,
+ x_t: torch.Tensor,
+ v_t: torch.Tensor,
+ x: torch.Tensor,
+ z: torch.Tensor,
+ t: torch.Tensor,
+ r: torch.Tensor,
+ dF_dv_dt: torch.Tensor,
+ F_t_cond: Optional[torch.Tensor] = 0.0,
+ F_t_uncond: Optional[torch.Tensor] = 0.0,
+ enhance_target = False,
+ ):
+ if enhance_target:
+ w_gt = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_gt, 1.0)
+ w_cond = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_cond, 0.0)
+ v_t = w_gt * v_t + w_cond * F_t_cond + (1-w_gt-w_cond) * F_t_uncond
+ F_target = v_t - (t - r) * dF_dv_dt
+ return F_target
+
+ def from_x_t_to_x_r(self, x_t: torch.Tensor, t: torch.Tensor, r: torch.Tensor, F: torch.Tensor, s_ratio=0.0):
+ x_r = x_t - (t - r) * F
+ if s_ratio > 0.0:
+ z = x_t + (1-t) * F
+ epsilon = torch.randn_like(z)
+ dt = t-r
+ x_r = x_r - s_ratio * z * dt + torch.sqrt(s_ratio*2*t*dt) * epsilon
+ return x_r
+
+
+
+
+class TrigFlow(Transport):
+ def __init__(self, P_mean=-1.0, P_std=1.6, sigma_d=0.5, T_max=1.57, T_min=0.0, enhance_target=False, w_gt=1.0, w_cond=0.0, w_start=0.0, w_end=1.0):
+ '''
+ TrigFlow formulation from the paper:
+ "SIMPLIFYING, STABILIZING & SCALING CONTINUOUS-TIME CONSISTENCY MODELS"
+ '''
+ self.P_mean = P_mean
+ self.P_std = P_std
+ super().__init__(sigma_d, T_max, T_min, enhance_target, w_gt, w_cond, w_start, w_end)
+
+ def interpolant(self, t: torch.Tensor):
+ alpha_t = torch.cos(t)
+ sigma_t = torch.sin(t)
+ d_alpha_t = -torch.sin(t)
+ d_sigma_t = torch.cos(t)
+ return alpha_t, sigma_t, d_alpha_t, d_sigma_t
+
+ def sample_t(self, batch_size, dtype, device):
+ rnd_normal = torch.randn((batch_size, ), dtype=dtype, device=device)
+ sigma = (rnd_normal * self.P_std + self.P_mean).exp()
+ t = torch.atan(sigma) # [0, pi/2]
+ return t
+
+ def c_noise(self, t: torch.Tensor):
+ return t
+
+ def target(
+ self,
+ x_t: torch.Tensor,
+ v_t: torch.Tensor,
+ x: torch.Tensor,
+ z: torch.Tensor,
+ t: torch.Tensor,
+ r: torch.Tensor,
+ dF_dv_dt: torch.Tensor,
+ F_t_cond: Optional[torch.Tensor] = 0.0,
+ F_t_uncond: Optional[torch.Tensor] = 0.0,
+ enhance_target = False,
+ ):
+ if enhance_target:
+ w_gt = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_gt, 1.0)
+ w_cond = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_cond, 0.0)
+ v_t = w_gt * v_t + w_cond * F_t_cond + (1-w_gt-w_cond) * F_t_uncond
+ F_target = v_t - torch.tan(t - r) * (x_t + dF_dv_dt)
+ return F_target
+
+ def from_x_t_to_x_r(self, x_t: torch.Tensor, t: torch.Tensor, r: torch.Tensor, F: torch.Tensor, s_ratio=0.0):
+ x_r = torch.cos(t - r) * x_t - torch.sin(t - r) * F
+ return x_r
+
+
+class EDM(Transport):
+ def __init__(self, P_mean=-1.2, P_std=1.2, sigma_d=0.5, T_max=80.0, T_min=0.01, enhance_target=False, w_gt=1.0, w_cond=0.0, w_start=0.0, w_end=1.0):
+ '''
+ EDM formulation from the paper:
+ "Elucidating the Design Space of Diffusion-Based Generative Models"
+ '''
+ self.P_mean = P_mean
+ self.P_std = P_std
+ super().__init__(sigma_d, T_max, T_min, enhance_target, w_gt, w_cond, w_start, w_end)
+
+ def interpolant(self, t: torch.Tensor):
+ '''
+ The d_alpha_t and d_sigma_t are easy to obtain:
+ # from sympy import *
+ # from scipy.stats import *
+ # t, sigma_d = symbols('t sigma_d')
+ # alpha_t = sigma_d * ((t**2 + sigma_d**2) ** (-0.5))
+ # sigma_t = t * ((t**2 + sigma_d**2) ** (-0.5))
+ # d_alpha_t = diff(alpha_t, t)
+ # d_sigma_t = diff(sigma_t, t)
+ # print(d_alpha_t)
+ # print(d_sigma_t)
+ '''
+ sigma_d = self.sigma_d
+ alpha_t = 1 / (t**2 + sigma_d**2).sqrt()
+ sigma_t = t / (t**2 + sigma_d**2).sqrt()
+ d_alpha_t = -t / ((sigma_d ** 2 + t ** 2) ** 1.5)
+ d_sigma_t = (sigma_d ** 2) / ((sigma_d ** 2 + t ** 2) ** 1.5)
+ return alpha_t, sigma_t, d_alpha_t, d_sigma_t
+
+ def sample_t(self, batch_size, dtype, device):
+ rnd_normal = torch.randn((batch_size, ), dtype=dtype, device=device)
+ sigma = (rnd_normal * self.P_std + self.P_mean).exp()
+ t = sigma # t > 0
+ return t
+
+ def c_noise(self, t: torch.Tensor):
+ return torch.log(t) / 4
+
+ def target(
+ self,
+ x_t: torch.Tensor,
+ v_t: torch.Tensor,
+ x: torch.Tensor,
+ z: torch.Tensor,
+ t: torch.Tensor,
+ r: torch.Tensor,
+ dF_dv_dt: torch.Tensor,
+ F_t_cond: Optional[torch.Tensor] = 0.0,
+ F_t_uncond: Optional[torch.Tensor] = 0.0,
+ enhance_target = False,
+ ):
+ sigma_d = self.sigma_d
+ alpha_hat_t = t / (sigma_d * (t**2 + sigma_d**2).sqrt())
+ sigma_hat_t = - sigma_d / (t**2 + sigma_d**2).sqrt()
+ d_alpha_hat_t = -t**2/(sigma_d*(sigma_d**2 + t**2)**(3/2)) + 1/(sigma_d*(sigma_d**2 + t**2).sqrt())
+ d_sigma_hat_t = sigma_d * t / ((sigma_d**2 + t**2)**(3/2))
+ diffusion_target = alpha_hat_t * x + sigma_hat_t * z
+ Bt_dv_dBt = (t - r) * (sigma_d**2 + t**2) * (sigma_d**3 + t**2) / (
+ 2*t*(r - t)*(sigma_d**2 + t**2) - t*(r - t)*(sigma_d**3 + t**2) + (sigma_d**2 + t**2)*(sigma_d**3 + t**2)
+ )
+ if enhance_target:
+ w_gt = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_gt, 1.0)
+ w_cond = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_cond, 0.0)
+ diffusion_target = w_gt * diffusion_target + w_cond * F_t_cond + (1-w_gt-w_cond) * F_t_uncond
+ F_target = diffusion_target + Bt_dv_dBt * (d_alpha_hat_t*x + d_sigma_hat_t*z -dF_dv_dt)
+ return F_target
+
+
+ def from_x_t_to_x_r(self, x_t: torch.Tensor, t: torch.Tensor, r: torch.Tensor, F: torch.Tensor, s_ratio=0.0):
+ sigma_d = self.sigma_d
+ ratio = (t**2 + sigma_d**2).sqrt() / (r**2 + sigma_d**2).sqrt() / (sigma_d**3 + t**2)
+ A_t = (sigma_d**3 + t*r) * ratio
+ B_t = (sigma_d**2) * (t-r) * ratio
+ x_r = A_t * x_t + B_t * F
+ return x_r
+
+
+class VP_SDE(Transport):
+ def __init__(self, beta_min=0.1, beta_d=19.9, epsilon_t=1e-5, T=1000, sigma_d=1.0, enhance_target=False, w_gt=1.0, w_cond=0.0, w_start=0.0, w_end=1.0):
+ '''
+ Variance preserving (VP) formulation from the paper:
+ "Score-Based Generative Modeling through Stochastic Differential Equations".
+ '''
+ self.beta_min = beta_min
+ self.beta_d = beta_d
+ self.epsilon_t = epsilon_t
+ self.T = T
+ super().__init__(sigma_d, 1.0, epsilon_t, enhance_target, w_gt, w_cond, w_start, w_end)
+
+ def interpolant(self, t: torch.Tensor):
+ '''
+ The d_alpha_t and d_sigma_t are easy to obtain:
+ # from sympy import *
+ # from scipy.stats import *
+ # t, beta_d, beta_min = symbols('t beta_d beta_min')
+ # sigma = sqrt(exp(0.5 * beta_d * (t ** 2) + beta_min * t) - 1)
+ # d_sigma_d_t = diff(sigma, t)
+ # print(d_sigma_d_t)
+ # sigma = symbols('sigma')
+ # alpha_t = (sigma**2 + 1) ** (-0.5)
+ # sigma_t = sigma * (sigma**2 + 1) ** (-0.5)
+ # d_alpha_d_sigma = diff(alpha_t, sigma)
+ # print(d_alpha_d_sigma)
+ # d_sigma_d_sigma = diff(sigma_t, sigma)
+ # print(d_sigma_d_sigma)
+ '''
+ beta_t = self.beta(t)
+ alpha_t = 1 / torch.sqrt(beta_t**2 + 1)
+ sigma_t = beta_t / torch.sqrt(beta_t**2 + 1)
+ d_alpha_t = -0.5 * (self.beta_d * t + self.beta_min) / (beta_t**2 + 1).sqrt()
+ d_sigma_t = 0.5 * (self.beta_d * t + self.beta_min) / (beta_t * (beta_t**2 + 1).sqrt())
+ return alpha_t, sigma_t, d_alpha_t, d_sigma_t
+
+ def beta(self, t: torch.Tensor):
+ return torch.sqrt((0.5 * self.beta_d * (t ** 2) + self. beta_min * t).exp() - 1)
+
+ def sample_t(self, batch_size, dtype, device):
+ rnd_uniform = torch.rand((batch_size, ), dtype=dtype, device=device)
+ t = 1 + rnd_uniform * (self.epsilon_t - 1) # [epsilon_t, 1]
+ return t
+
+ def c_noise(self, t: torch.Tensor):
+ return (self.T - 1) * t
+
+ def target(
+ self,
+ x_t: torch.Tensor,
+ v_t: torch.Tensor,
+ x: torch.Tensor,
+ z: torch.Tensor,
+ t: torch.Tensor,
+ r: torch.Tensor,
+ dF_dv_dt: torch.Tensor,
+ F_t_cond: Optional[torch.Tensor] = 0.0,
+ F_t_uncond: Optional[torch.Tensor] = 0.0,
+ enhance_target = False,
+ ):
+ if enhance_target:
+ w_gt = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_gt, 1.0)
+ w_cond = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_cond, 0.0)
+ z = w_gt * z + w_cond * F_t_cond + (1-w_gt-w_cond) * F_t_uncond
+ beta_t = self.beta(t)
+ beta_r = self.beta(r)
+ d_beta_t = (self.beta_d * t + self.beta_min) * (beta_t ** 2 + 1) / (2 * beta_t)
+ F_target = z - dF_dv_dt * (beta_t - beta_r) / d_beta_t
+ return F_target
+
+ def from_x_t_to_x_r(self, x_t: torch.Tensor, t: torch.Tensor, r: torch.Tensor, F: torch.Tensor, s_ratio=0.0):
+ beta_t = self.beta(t)
+ beta_r = self.beta(r)
+ A_t = (beta_t ** 2 + 1).sqrt() / (beta_r ** 2 + 1).sqrt()
+ B_t = (beta_r - beta_t) / (beta_r ** 2 + 1).sqrt()
+ x_r = A_t * x_t + B_t * F
+ return x_r
+
+
+
+
+class VE_SDE(Transport):
+ def __init__(self, sigma_min=0.02, sigma_max=100, sigma_d=1.0, enhance_target=False, w_gt=1.0, w_cond=0.0, w_start=0.0, w_end=1.0):
+ '''
+ Variance exploding (VE) formulation from the paper:
+ "Score-Based Generative Modeling through Stochastic Differential Equations".
+ '''
+ self.sigma_min = sigma_min
+ self.sigma_max = sigma_max
+ super().__init__(sigma_d, sigma_max, sigma_min, enhance_target, w_gt, w_cond, w_start, w_end)
+
+ def interpolant(self, t: torch.Tensor):
+ alpha_t = 1
+ sigma_t = t
+ d_alpha_t = 0
+ d_sigma_t = 1
+ return alpha_t, sigma_t, d_alpha_t, d_sigma_t
+
+ def sample_t(self, batch_size, dtype, device):
+ rnd_uniform = torch.rand((batch_size, ), dtype=dtype, device=device)
+ t = self.sigma_min * ((self.sigma_max / self.sigma_min) ** rnd_uniform) # [sigma_min, sigma_max]
+ return t
+
+ def c_noise(self, t: torch.Tensor):
+ return torch.log(0.5 * t)
+
+ def target(
+ self,
+ x_t: torch.Tensor,
+ v_t: torch.Tensor,
+ x: torch.Tensor,
+ z: torch.Tensor,
+ t: torch.Tensor,
+ r: torch.Tensor,
+ dF_dv_dt: torch.Tensor,
+ F_t_cond: Optional[torch.Tensor] = 0.0,
+ F_t_uncond: Optional[torch.Tensor] = 0.0,
+ enhance_target = False,
+ ):
+ if enhance_target:
+ w_gt = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_gt, 1.0)
+ w_cond = torch.where((t>=self.w_start) & (t<=self.w_end), self.w_cond, 0.0)
+ z = w_gt * z + w_cond * (-F_t_cond) + (1-w_gt-w_cond) * (-F_t_uncond)
+ F_target = (r - t) * dF_dv_dt - z
+ return F_target
+
+
+ def from_x_t_to_x_r(self, x_t: torch.Tensor, t: torch.Tensor, r: torch.Tensor, F: torch.Tensor, s_ratio=0.0):
+ x_r = x_t + (t - r) * F
+ return x_r
diff --git a/tim/utils/__init__.py b/tim/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..56ed35d16593187b1a98b7f8f86146f8f2f481c2
--- /dev/null
+++ b/tim/utils/__init__.py
@@ -0,0 +1,3 @@
+from .misc_utils import *
+from .train_utils import *
+from .gpu_memory_monitor import *
\ No newline at end of file
diff --git a/tim/utils/deepspeed_zero_to_fp32.py b/tim/utils/deepspeed_zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..b095589081755d2e9d9290443e7c6faa3d13b282
--- /dev/null
+++ b/tim/utils/deepspeed_zero_to_fp32.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict
+ param_shapes: dict
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict
+ frozen_param_fragments: dict
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_file,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/tim/utils/gpu_memory_monitor.py b/tim/utils/gpu_memory_monitor.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4c4306a224e09517cd2c002dcedcf71b123ac5a
--- /dev/null
+++ b/tim/utils/gpu_memory_monitor.py
@@ -0,0 +1,91 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+from collections import namedtuple
+from datetime import datetime
+from typing import Any, Dict, Optional
+
+import torch
+
+# named tuple for passing GPU memory stats for logging
+GPUMemStats = namedtuple(
+ "GPUMemStats",
+ [
+ "max_active_gib",
+ "max_active_pct",
+ "max_reserved_gib",
+ "max_reserved_pct",
+ "num_alloc_retries",
+ "num_ooms",
+ ],
+)
+
+
+class GPUMemoryMonitor:
+ def __init__(self, logger, device: str = "cuda:0"):
+ self.device = torch.device(device) # device object
+ self.device_name = torch.cuda.get_device_name(self.device)
+ self.device_index = torch.cuda.current_device()
+ self.device_capacity = torch.cuda.get_device_properties(
+ self.device
+ ).total_memory
+ self.device_capacity_gib = self._to_gib(self.device_capacity)
+
+ self.logger = logger
+
+ torch.cuda.reset_peak_memory_stats()
+ torch.cuda.empty_cache()
+
+ def _to_gib(self, memory_in_bytes):
+ # NOTE: GiB (gibibyte) is 1024, vs GB is 1000
+ _gib_in_bytes = 1024 * 1024 * 1024
+ memory_in_gib = memory_in_bytes / _gib_in_bytes
+ return memory_in_gib
+
+ def _to_pct(self, memory):
+ return 100 * memory / self.device_capacity
+
+ def get_peak_stats(self):
+ cuda_info = torch.cuda.memory_stats(self.device)
+
+ max_active = cuda_info["active_bytes.all.peak"]
+ max_active_gib = self._to_gib(max_active)
+ max_active_pct = self._to_pct(max_active)
+
+ max_reserved = cuda_info["reserved_bytes.all.peak"]
+ max_reserved_gib = self._to_gib(max_reserved)
+ max_reserved_pct = self._to_pct(max_reserved)
+
+ num_retries = cuda_info["num_alloc_retries"]
+ num_ooms = cuda_info["num_ooms"]
+
+ if num_retries > 0:
+ self.logger.warning(f"{num_retries} CUDA memory allocation retries.")
+ if num_ooms > 0:
+ self.logger.warning(f"{num_ooms} CUDA OOM errors thrown.")
+
+ return GPUMemStats(
+ max_active_gib,
+ max_active_pct,
+ max_reserved_gib,
+ max_reserved_pct,
+ num_retries,
+ num_ooms,
+ )
+
+ def reset_peak_stats(self):
+ torch.cuda.reset_peak_memory_stats()
+
+
+def build_gpu_memory_monitor(logger):
+ gpu_memory_monitor = GPUMemoryMonitor(logger, "cuda")
+ logger.info(
+ f"GPU capacity: {gpu_memory_monitor.device_name} ({gpu_memory_monitor.device_index}) "
+ f"with {gpu_memory_monitor.device_capacity_gib:.2f}GiB memory"
+ )
+
+ return gpu_memory_monitor
\ No newline at end of file
diff --git a/tim/utils/lr_scheduler.py b/tim/utils/lr_scheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b34aea9be6328d88d2eff4c63e1f18105f347ed
--- /dev/null
+++ b/tim/utils/lr_scheduler.py
@@ -0,0 +1,370 @@
+from torch.optim import Optimizer
+from torch.optim.lr_scheduler import LambdaLR
+
+
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch optimization for diffusion models."""
+
+import math
+from enum import Enum
+from typing import Optional, Union
+
+from torch.optim import Optimizer
+from torch.optim.lr_scheduler import LambdaLR
+
+
+class SchedulerType(Enum):
+ LINEAR = "linear"
+ COSINE = "cosine"
+ COSINE_WITH_RESTARTS = "cosine_with_restarts"
+ POLYNOMIAL = "polynomial"
+ CONSTANT = "constant"
+ CONSTANT_WITH_WARMUP = "constant_with_warmup"
+ PIECEWISE_CONSTANT = "piecewise_constant"
+ WARMDUP_STABLE_DECAY = "warmup_stable_decay"
+
+
+def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1):
+ """
+ Create a schedule with a constant learning rate, using the learning rate set in optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
+
+def get_constant_schedule_with_warmup(
+ optimizer: Optimizer, num_warmup_steps: int, div_factor: int = 1e-4, last_epoch: int = -1
+):
+ def lr_lambda(current_step):
+ # 0,y0 step,y1
+ #((y1-y0) * x/step + y0) / y1 = (y1-y0)/y1 * x/step + y0/y1
+ if current_step < num_warmup_steps:
+ return (1 - div_factor) * float(current_step) / float(max(1, num_warmup_steps)) + div_factor
+ return 1.0
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+def get_piecewise_constant_schedule(optimizer: Optimizer, step_rules: str, last_epoch: int = -1):
+ """
+ Create a schedule with a constant learning rate, using the learning rate set in optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ step_rules (`string`):
+ The rules for the learning rate. ex: rule_steps="1:10,0.1:20,0.01:30,0.005" it means that the learning rate
+ if multiple 1 for the first 10 steps, mutiple 0.1 for the next 20 steps, multiple 0.01 for the next 30
+ steps and multiple 0.005 for the other steps.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+
+ rules_dict = {}
+ rule_list = step_rules.split(",")
+ for rule_str in rule_list[:-1]:
+ value_str, steps_str = rule_str.split(":")
+ steps = int(steps_str)
+ value = float(value_str)
+ rules_dict[steps] = value
+ last_lr_multiple = float(rule_list[-1])
+
+ def create_rules_function(rules_dict, last_lr_multiple):
+ def rule_func(steps: int) -> float:
+ sorted_steps = sorted(rules_dict.keys())
+ for i, sorted_step in enumerate(sorted_steps):
+ if steps < sorted_step:
+ return rules_dict[sorted_steps[i]]
+ return last_lr_multiple
+
+ return rule_func
+
+ rules_func = create_rules_function(rules_dict, last_lr_multiple)
+
+ return LambdaLR(optimizer, rules_func, last_epoch=last_epoch)
+
+
+def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
+ """
+ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
+ a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ num_warmup_steps (`int`):
+ The number of steps for the warmup phase.
+ num_training_steps (`int`):
+ The total number of training steps.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+
+ def lr_lambda(current_step: int):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ return max(
+ 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
+ )
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+
+def get_cosine_schedule_with_warmup(
+ optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
+):
+ """
+ Create a schedule with a learning rate that decreases following the values of the cosine function between the
+ initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
+ initial lr set in the optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ num_warmup_steps (`int`):
+ The number of steps for the warmup phase.
+ num_training_steps (`int`):
+ The total number of training steps.
+ num_periods (`float`, *optional*, defaults to 0.5):
+ The number of periods of the cosine function in a schedule (the default is to just decrease from the max
+ value to 0 following a half-cosine).
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+
+ def lr_lambda(current_step):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
+ return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+
+def get_cosine_with_hard_restarts_schedule_with_warmup(
+ optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
+):
+ """
+ Create a schedule with a learning rate that decreases following the values of the cosine function between the
+ initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
+ linearly between 0 and the initial lr set in the optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ num_warmup_steps (`int`):
+ The number of steps for the warmup phase.
+ num_training_steps (`int`):
+ The total number of training steps.
+ num_cycles (`int`, *optional*, defaults to 1):
+ The number of hard restarts to use.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+ """
+
+ def lr_lambda(current_step):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
+ if progress >= 1.0:
+ return 0.0
+ return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+
+def get_polynomial_decay_schedule_with_warmup(
+ optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1
+):
+ """
+ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
+ optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
+ initial lr set in the optimizer.
+
+ Args:
+ optimizer ([`~torch.optim.Optimizer`]):
+ The optimizer for which to schedule the learning rate.
+ num_warmup_steps (`int`):
+ The number of steps for the warmup phase.
+ num_training_steps (`int`):
+ The total number of training steps.
+ lr_end (`float`, *optional*, defaults to 1e-7):
+ The end LR.
+ power (`float`, *optional*, defaults to 1.0):
+ Power factor.
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+
+ Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
+ implementation at
+ https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37
+
+ Return:
+ `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
+
+ """
+
+ lr_init = optimizer.defaults["lr"]
+ if not (lr_init > lr_end):
+ raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})")
+
+ def lr_lambda(current_step: int):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ elif current_step > num_training_steps:
+ return lr_end / lr_init # as LambdaLR multiplies by lr_init
+ else:
+ lr_range = lr_init - lr_end
+ decay_steps = num_training_steps - num_warmup_steps
+ pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps
+ decay = lr_range * pct_remaining**power + lr_end
+ return decay / lr_init # as LambdaLR multiplies by lr_init
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+
+def get_constant_schedule_with_warmup_and_decay(
+ optimizer: Optimizer, num_warmup_steps: int, num_decay_steps: int, decay_T: int = 50000, div_factor: int = 1e-4, last_epoch: int = -1
+):
+ def lr_lambda(current_step):
+ # 0,y0 step,y1
+ #((y1-y0) * x/step + y0) / y1 = (y1-y0)/y1 * x/step + y0/y1
+ if current_step < num_warmup_steps:
+ return (1 - div_factor) * float(current_step) / float(max(1, num_warmup_steps)) + div_factor
+ if current_step > num_decay_steps:
+ return 0.5 ** ((current_step - num_decay_steps) / decay_T)
+ return 1.0
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+TYPE_TO_SCHEDULER_FUNCTION = {
+ SchedulerType.LINEAR: get_linear_schedule_with_warmup,
+ SchedulerType.COSINE: get_cosine_schedule_with_warmup,
+ SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
+ SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
+ SchedulerType.CONSTANT: get_constant_schedule,
+ SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
+ SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
+ SchedulerType.WARMDUP_STABLE_DECAY: get_constant_schedule_with_warmup_and_decay
+}
+
+
+
+
+
+
+
+def get_scheduler(
+ name: Union[str, SchedulerType],
+ optimizer: Optimizer,
+ step_rules: Optional[str] = None,
+ num_warmup_steps: Optional[int] = None,
+ num_decay_steps: Optional[int] = None,
+ num_training_steps: Optional[int] = None,
+ num_cycles: int = 1,
+ decay_T: Optional[int] = 50000,
+ power: float = 1.0,
+ last_epoch: int = -1,
+):
+ """
+ Unified API to get any scheduler from its name.
+
+ Args:
+ name (`str` or `SchedulerType`):
+ The name of the scheduler to use.
+ optimizer (`torch.optim.Optimizer`):
+ The optimizer that will be used during training.
+ step_rules (`str`, *optional*):
+ A string representing the step rules to use. This is only used by the `PIECEWISE_CONSTANT` scheduler.
+ num_warmup_steps (`int`, *optional*):
+ The number of warmup steps to do. This is not required by all schedulers (hence the argument being
+ optional), the function will raise an error if it's unset and the scheduler type requires it.
+ num_decay_steps (`int`, *optional*):
+ The number of decay steps to do. This is not required by all schedulers (hence the argument being
+ optional), the function will raise an error if it's unset and the scheduler type requires it.
+ num_training_steps (`int``, *optional*):
+ The number of training steps to do. This is not required by all schedulers (hence the argument being
+ optional), the function will raise an error if it's unset and the scheduler type requires it.
+ num_cycles (`int`, *optional*):
+ The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler.
+ power (`float`, *optional*, defaults to 1.0):
+ Power factor. See `POLYNOMIAL` scheduler
+ decay_T (`int`, *optional*, defaults to 50000):
+ Power factor. See `POLYNOMIAL` scheduler
+ last_epoch (`int`, *optional*, defaults to -1):
+ The index of the last epoch when resuming training.
+ """
+ name = SchedulerType(name)
+ schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
+ if name == SchedulerType.CONSTANT:
+ return schedule_func(optimizer, last_epoch=last_epoch)
+
+ if name == SchedulerType.PIECEWISE_CONSTANT:
+ return schedule_func(optimizer, step_rules=step_rules, last_epoch=last_epoch)
+
+ # All other schedulers require `num_warmup_steps`
+ if num_warmup_steps is None:
+ raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
+
+ if name == SchedulerType.CONSTANT_WITH_WARMUP:
+ return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, last_epoch=last_epoch)
+
+ if name == SchedulerType.WARMDUP_STABLE_DECAY:
+ return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_decay_steps=num_decay_steps, decay_T=decay_T, last_epoch=last_epoch)
+
+ # All other schedulers require `num_training_steps`
+ if num_training_steps is None:
+ raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
+
+ if name == SchedulerType.COSINE_WITH_RESTARTS:
+ return schedule_func(
+ optimizer,
+ num_warmup_steps=num_warmup_steps,
+ num_training_steps=num_training_steps,
+ num_cycles=num_cycles,
+ last_epoch=last_epoch,
+ )
+
+ if name == SchedulerType.POLYNOMIAL:
+ return schedule_func(
+ optimizer,
+ num_warmup_steps=num_warmup_steps,
+ num_training_steps=num_training_steps,
+ power=power,
+ last_epoch=last_epoch,
+ )
+
+ return schedule_func(
+ optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, last_epoch=last_epoch
+ )
diff --git a/tim/utils/misc_utils.py b/tim/utils/misc_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d7fa51bad48ecd6db4cdce5ce562a67b77dc180
--- /dev/null
+++ b/tim/utils/misc_utils.py
@@ -0,0 +1,377 @@
+import functools
+import importlib
+import os
+import wandb
+import fsspec
+import numpy as np
+import torch
+
+from dataclasses import dataclass
+from functools import partial
+from inspect import isfunction
+from PIL import Image, ImageDraw, ImageFont
+from safetensors.torch import load_file
+from tqdm import tqdm
+
+def create_npz_from_sample_folder(sample_dir, num=50_000):
+ """
+ Builds a single .npz file from a folder of .png samples.
+ """
+ samples = []
+ imgs = sorted(os.listdir(sample_dir), key=lambda x: int(x.split('.')[0]))
+ print(len(imgs))
+ assert len(imgs) >= num
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
+ sample_pil = Image.open(f"{sample_dir}/{imgs[i]}")
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
+ samples.append(sample_np)
+ samples = np.stack(samples)
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
+ npz_path = f"{sample_dir}.npz"
+ np.savez(npz_path, arr_0=samples)
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
+ return npz_path
+
+def init_from_ckpt(
+ model, checkpoint_dir, ignore_keys=None, verbose=False
+) -> None:
+ if checkpoint_dir.endswith(".safetensors"):
+ model_state_dict=load_file(checkpoint_dir, device='cpu')
+ else:
+ model_state_dict=torch.load(checkpoint_dir, map_location="cpu")
+ model_new_ckpt=dict()
+ for i in model_state_dict.keys():
+ model_new_ckpt[i] = model_state_dict[i]
+ keys = list(model_new_ckpt.keys())
+ for k in keys:
+ if ignore_keys:
+ for ik in ignore_keys:
+ if ik in k:
+ print("Deleting key {} from state_dict.".format(k))
+ del model_new_ckpt[k]
+ missing, unexpected = model.load_state_dict(model_new_ckpt, strict=False)
+ if verbose:
+ print(
+ f"Restored with {len(missing)} missing and {len(unexpected)} unexpected keys"
+ )
+ if len(missing) > 0:
+ print(f"Missing Keys: {missing}")
+ if len(unexpected) > 0:
+ print(f"Unexpected Keys: {unexpected}")
+ if verbose:
+ print("")
+
+
+def get_dtype(str_dtype):
+ if str_dtype == 'fp16':
+ return torch.float16
+ elif str_dtype == 'bf16':
+ return torch.bfloat16
+ else:
+ return torch.float32
+
+
+def disabled_train(self, mode=True):
+ """Overwrite model.train with this function to make sure train/eval mode
+ does not change anymore."""
+ return self
+
+
+def get_string_from_tuple(s):
+ try:
+ # Check if the string starts and ends with parentheses
+ if s[0] == "(" and s[-1] == ")":
+ # Convert the string to a tuple
+ t = eval(s)
+ # Check if the type of t is tuple
+ if type(t) == tuple:
+ return t[0]
+ else:
+ pass
+ except:
+ pass
+ return s
+
+
+def is_power_of_two(n):
+ """
+ chat.openai.com/chat
+ Return True if n is a power of 2, otherwise return False.
+
+ The function is_power_of_two takes an integer n as input and returns True if n is a power of 2, otherwise it returns False.
+ The function works by first checking if n is less than or equal to 0. If n is less than or equal to 0, it can't be a power of 2, so the function returns False.
+ If n is greater than 0, the function checks whether n is a power of 2 by using a bitwise AND operation between n and n-1. If n is a power of 2, then it will have only one bit set to 1 in its binary representation. When we subtract 1 from a power of 2, all the bits to the right of that bit become 1, and the bit itself becomes 0. So, when we perform a bitwise AND between n and n-1, we get 0 if n is a power of 2, and a non-zero value otherwise.
+ Thus, if the result of the bitwise AND operation is 0, then n is a power of 2 and the function returns True. Otherwise, the function returns False.
+
+ """
+ if n <= 0:
+ return False
+ return (n & (n - 1)) == 0
+
+
+def autocast(f, enabled=True):
+ def do_autocast(*args, **kwargs):
+ with torch.cuda.amp.autocast(
+ enabled=enabled,
+ dtype=torch.get_autocast_gpu_dtype(),
+ cache_enabled=torch.is_autocast_cache_enabled(),
+ ):
+ return f(*args, **kwargs)
+
+ return do_autocast
+
+
+def load_partial_from_config(config):
+ return partial(get_obj_from_str(config["target"]), **config.get("params", dict()))
+
+
+def log_txt_as_img(wh, xc, size=10):
+ # wh a tuple of (width, height)
+ # xc a list of captions to plot
+ b = len(xc)
+ txts = list()
+ for bi in range(b):
+ txt = Image.new("RGB", wh, color="white")
+ draw = ImageDraw.Draw(txt)
+ font = ImageFont.truetype("data/DejaVuSans.ttf", size=size)
+ nc = int(40 * (wh[0] / 256))
+ if isinstance(xc[bi], list):
+ text_seq = xc[bi][0]
+ else:
+ text_seq = xc[bi]
+ lines = "\n".join(
+ text_seq[start : start + nc] for start in range(0, len(text_seq), nc)
+ )
+
+ try:
+ draw.text((0, 0), lines, fill="black", font=font)
+ except UnicodeEncodeError:
+ print("Cant encode string for logging. Skipping.")
+
+ txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
+ txts.append(txt)
+ txts = np.stack(txts)
+ txts = torch.tensor(txts)
+ return txts
+
+
+def partialclass(cls, *args, **kwargs):
+ class NewCls(cls):
+ __init__ = functools.partialmethod(cls.__init__, *args, **kwargs)
+
+ return NewCls
+
+
+def make_path_absolute(path):
+ fs, p = fsspec.core.url_to_fs(path)
+ if fs.protocol == "file":
+ return os.path.abspath(p)
+ return path
+
+
+def ismap(x):
+ if not isinstance(x, torch.Tensor):
+ return False
+ return (len(x.shape) == 4) and (x.shape[1] > 3)
+
+
+def isimage(x):
+ if not isinstance(x, torch.Tensor):
+ return False
+ return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
+
+
+def isheatmap(x):
+ if not isinstance(x, torch.Tensor):
+ return False
+
+ return x.ndim == 2
+
+
+def isneighbors(x):
+ if not isinstance(x, torch.Tensor):
+ return False
+ return x.ndim == 5 and (x.shape[2] == 3 or x.shape[2] == 1)
+
+
+def exists(x):
+ return x is not None
+
+
+def expand_dims_like(x, y):
+ while x.dim() != y.dim():
+ x = x.unsqueeze(-1)
+ return x
+
+
+def default(val, d):
+ if exists(val):
+ return val
+ return d() if isfunction(d) else d
+
+
+def mean_flat(tensor):
+ """
+ https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
+ Take the mean over all non-batch dimensions.
+ """
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
+
+
+def count_params(model, verbose=False):
+ total_params = sum(p.numel() for p in model.parameters())
+ if verbose:
+ print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.")
+ return total_params
+
+
+def instantiate_from_config(config):
+ if not "target" in config:
+ if config == "__is_first_stage__":
+ return None
+ elif config == "__is_unconditional__":
+ return None
+ raise KeyError("Expected key `target` to instantiate.")
+ return get_obj_from_str(config["target"])(**config.get("params", dict()))
+
+
+def get_obj_from_str(string, reload=False, invalidate_cache=True):
+ module, cls = string.rsplit(".", 1)
+ if invalidate_cache:
+ importlib.invalidate_caches()
+ if reload:
+ module_imp = importlib.import_module(module)
+ importlib.reload(module_imp)
+ return getattr(importlib.import_module(module, package=None), cls)
+
+
+def append_zero(x):
+ return torch.cat([x, x.new_zeros([1])])
+
+
+def append_dims(x, target_dims):
+ """Appends dimensions to the end of a tensor until it has target_dims dimensions."""
+ dims_to_append = target_dims - x.ndim
+ if dims_to_append < 0:
+ raise ValueError(
+ f"input has {x.ndim} dims but target_dims is {target_dims}, which is less"
+ )
+ return x[(...,) + (None,) * dims_to_append]
+
+
+def load_model_from_config(config, ckpt, verbose=True, freeze=True):
+ print(f"Loading model from {ckpt}")
+ if ckpt.endswith("ckpt"):
+ pl_sd = torch.load(ckpt, map_location="cpu")
+ if "global_step" in pl_sd:
+ print(f"Global Step: {pl_sd['global_step']}")
+ sd = pl_sd["state_dict"]
+ elif ckpt.endswith("safetensors"):
+ sd = load_safetensors(ckpt)
+ elif ckpt.endswith("bin"):
+ sd = torch.load(ckpt, map_location="cpu")
+ else:
+ raise NotImplementedError
+
+ model = instantiate_from_config(config.model)
+
+ m, u = model.load_state_dict(sd, strict=False)
+
+ if len(m) > 0 and verbose:
+ print("missing keys:")
+ print(m)
+ if len(u) > 0 and verbose:
+ print("unexpected keys:")
+ print(u)
+
+ if freeze:
+ for param in model.parameters():
+ param.requires_grad = False
+
+ model.eval()
+ return model
+
+
+def format_number(num):
+ num = float(num)
+ num /= 1000.0
+ return '{:.0f}{}'.format(num, 'k')
+
+def get_num_params(model: torch.nn.ModuleList) -> int:
+ num_params = sum(p.numel() for p in model.parameters())
+ return num_params
+
+
+def get_num_flop_per_token(num_params, model_config, seq_len) -> int:
+ l, h, q, t = (
+ model_config.n_layers,
+ model_config.n_heads,
+ model_config.dim // model_config.n_heads,
+ seq_len,
+ )
+ # Reasoning behind the factor of 12 for the self-attention part of the formula:
+ # 1. each self-attention has 2 matmul in the forward and 4 in the backward (6)
+ # 2. the flash attention does 1 more matmul recomputation in the backward
+ # but recomputation should not be counted in calculating MFU (+0)
+ # 3. each matmul performs 1 multiplication and 1 addition (*2)
+ # 4. we follow the convention and do not account for sparsity in causal attention
+ flop_per_token = 6 * num_params + 12 * l * h * q * t
+
+ return flop_per_token
+
+def get_num_flop_per_sequence_encoder_only(num_params, model_config, seq_len) -> int:
+ l, h, q = (
+ model_config.n_layers,
+ model_config.n_heads,
+ model_config.dim // model_config.n_heads,
+ )
+
+ # 1. 每个自注意力层有2个矩阵乘法在前向传播,4个在反向传播 (6)
+ # 2. 每个矩阵乘法执行1次乘法和1次加法 (*2)
+ # 3. 双向注意力需要考虑所有token对,所以是t^2而不是t
+ flop_per_sequence = 6 * num_params + 12 * l * h * q * seq_len * seq_len
+
+ return flop_per_sequence
+
+
+# hardcoded BF16 type peak flops for NVIDIA A100 and H100 GPU
+def get_peak_flops(device_name: str) -> int:
+ if "A100" in device_name:
+ # data from https://www.nvidia.com/en-us/data-center/a100/
+ return 312e12
+ elif "H100" in device_name:
+ # data from https://www.nvidia.com/en-us/data-center/h100/
+ # NOTE: Specifications are one-half lower without sparsity.
+ if "NVL" in device_name:
+ return 1979e12
+ elif "PCIe" in device_name:
+ return 756e12
+ else: # for SXM and other variants
+ return 989e12
+ else: # for other GPU types, assume A100
+ return 312e12
+
+@dataclass(frozen=True)
+class Color:
+ black = "\033[30m"
+ red = "\033[31m"
+ green = "\033[32m"
+ yellow = "\033[33m"
+ blue = "\033[34m"
+ magenta = "\033[35m"
+ cyan = "\033[36m"
+ white = "\033[37m"
+ reset = "\033[39m"
+
+
+@dataclass(frozen=True)
+class NoColor:
+ black = ""
+ red = ""
+ green = ""
+ yellow = ""
+ blue = ""
+ magenta = ""
+ cyan = ""
+ white = ""
+ reset = ""
\ No newline at end of file
diff --git a/tim/utils/train_utils.py b/tim/utils/train_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4eec6e9ab2a615d551212237fe00de32da57e896
--- /dev/null
+++ b/tim/utils/train_utils.py
@@ -0,0 +1,99 @@
+import torch
+from collections import OrderedDict
+from copy import deepcopy
+from diffusers.utils import logging
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+def get_fsdp_plugin(fsdp_cfg, mixed_precision):
+ import functools
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
+ BackwardPrefetch, CPUOffload, ShardingStrategy, MixedPrecision,
+ StateDictType, FullStateDictConfig, FullOptimStateDictConfig,
+ )
+ from accelerate.utils import FullyShardedDataParallelPlugin
+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy
+
+ if mixed_precision == "fp16":
+ dtype = torch.float16
+ elif mixed_precision == "bf16":
+ dtype = torch.bfloat16
+ else:
+ dtype = torch.float32
+ fsdp_plugin = FullyShardedDataParallelPlugin(
+ sharding_strategy = {
+ 'FULL_SHARD': ShardingStrategy.FULL_SHARD,
+ 'SHARD_GRAD_OP': ShardingStrategy.SHARD_GRAD_OP,
+ 'NO_SHARD': ShardingStrategy.NO_SHARD,
+ 'HYBRID_SHARD': ShardingStrategy.HYBRID_SHARD,
+ 'HYBRID_SHARD_ZERO2': ShardingStrategy._HYBRID_SHARD_ZERO2,
+ }[fsdp_cfg.sharding_strategy],
+ backward_prefetch = {
+ 'BACKWARD_PRE': BackwardPrefetch.BACKWARD_PRE,
+ 'BACKWARD_POST': BackwardPrefetch.BACKWARD_POST,
+ }[fsdp_cfg.backward_prefetch],
+ mixed_precision_policy = MixedPrecision(
+ param_dtype=dtype,
+ reduce_dtype=dtype,
+ ),
+ auto_wrap_policy = functools.partial(
+ size_based_auto_wrap_policy, min_num_params=fsdp_cfg.min_num_params
+ ),
+ cpu_offload = CPUOffload(offload_params=fsdp_cfg.cpu_offload),
+ state_dict_type = {
+ 'FULL_STATE_DICT': StateDictType.FULL_STATE_DICT,
+ 'LOCAL_STATE_DICT': StateDictType.LOCAL_STATE_DICT,
+ 'SHARDED_STATE_DICT': StateDictType.SHARDED_STATE_DICT
+ }[fsdp_cfg.state_dict_type],
+ state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True),
+ optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True),
+ limit_all_gathers = fsdp_cfg.limit_all_gathers,
+ use_orig_params = fsdp_cfg.use_orig_params,
+ sync_module_states = fsdp_cfg.sync_module_states,
+ forward_prefetch = fsdp_cfg.forward_prefetch,
+ activation_checkpointing = fsdp_cfg.activation_checkpointing,
+ )
+ return fsdp_plugin
+
+
+def freeze_model(model, trainable_modules={}, verbose=False):
+ logger.info("Start freeze")
+ for name, param in model.named_parameters():
+ param.requires_grad = False
+ if verbose:
+ logger.info("freeze moduel: "+str(name))
+ for trainable_module_name in trainable_modules:
+ if trainable_module_name in name:
+ param.requires_grad = True
+ if verbose:
+ logger.info("unfreeze moduel: "+str(name))
+ break
+ logger.info("End freeze")
+ params_unfreeze = [p.numel() if p.requires_grad == True else 0 for n, p in model.named_parameters()]
+ params_freeze = [p.numel() if p.requires_grad == False else 0 for n, p in model.named_parameters()]
+ logger.info(f"Unfreeze Module Parameters: {sum(params_unfreeze) / 1e6} M")
+ logger.info(f"Freeze Module Parameters: {sum(params_freeze) / 1e6} M")
+ return
+
+
+@torch.no_grad()
+def update_ema(ema_model, model, decay=0.9999):
+ """
+ Step the EMA model towards the current model.
+ """
+ if hasattr(model, 'module'):
+ model = model.module
+ if hasattr(ema_model, 'module'):
+ ema_model = ema_model.module
+ ema_params = OrderedDict(ema_model.named_parameters())
+ model_params = OrderedDict(model.named_parameters())
+
+ for name, param in model_params.items():
+ # TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed
+ ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
+
+
+
+def log_validation(model):
+ pass
\ No newline at end of file
diff --git a/uv.lock b/uv.lock
new file mode 100644
index 0000000000000000000000000000000000000000..5d88d0f9f9c0f61b7d536ddbfaee1166e8bff169
--- /dev/null
+++ b/uv.lock
@@ -0,0 +1,2599 @@
+version = 1
+revision = 3
+requires-python = ">=3.10"
+resolution-markers = [
+ "python_full_version >= '3.13' and sys_platform == 'linux'",
+ "python_full_version == '3.12.*' and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and sys_platform == 'linux'",
+ "python_full_version >= '3.13' and sys_platform != 'linux'",
+ "python_full_version == '3.12.*' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and sys_platform == 'linux'",
+ "python_full_version < '3.11' and sys_platform != 'linux'",
+]
+
+[[package]]
+name = "accelerate"
+version = "1.10.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "huggingface-hub" },
+ { name = "numpy" },
+ { name = "packaging" },
+ { name = "psutil" },
+ { name = "pyyaml" },
+ { name = "safetensors" },
+ { name = "torch" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b1/72/ff3961c19ee395c3d30ac630ee77bfb0e1b46b87edc504d4f83bb4a89705/accelerate-1.10.1.tar.gz", hash = "sha256:3dea89e433420e4bfac0369cae7e36dcd6a56adfcfd38cdda145c6225eab5df8", size = 392446, upload-time = "2025-08-25T13:57:06.21Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5f/a0/d9ef19f780f319c21ee90ecfef4431cbeeca95bec7f14071785c17b6029b/accelerate-1.10.1-py3-none-any.whl", hash = "sha256:3621cff60b9a27ce798857ece05e2b9f56fcc71631cfb31ccf71f0359c311f11", size = 374909, upload-time = "2025-08-25T13:57:04.55Z" },
+]
+
+[[package]]
+name = "aiofiles"
+version = "24.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0b/03/a88171e277e8caa88a4c77808c20ebb04ba74cc4681bf1e9416c862de237/aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c", size = 30247, upload-time = "2024-06-24T11:02:03.584Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896, upload-time = "2024-06-24T11:02:01.529Z" },
+]
+
+[[package]]
+name = "altair"
+version = "5.5.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jinja2" },
+ { name = "jsonschema" },
+ { name = "narwhals" },
+ { name = "packaging" },
+ { name = "typing-extensions", marker = "python_full_version < '3.14'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/16/b1/f2969c7bdb8ad8bbdda031687defdce2c19afba2aa2c8e1d2a17f78376d8/altair-5.5.0.tar.gz", hash = "sha256:d960ebe6178c56de3855a68c47b516be38640b73fb3b5111c2a9ca90546dd73d", size = 705305, upload-time = "2024-11-23T23:39:58.542Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/aa/f3/0b6ced594e51cc95d8c1fc1640d3623770d01e4969d29c0bd09945fafefa/altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c", size = 731200, upload-time = "2024-11-23T23:39:56.4Z" },
+]
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
+]
+
+[[package]]
+name = "antlr4-python3-runtime"
+version = "4.9.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034, upload-time = "2021-11-06T17:52:23.524Z" }
+
+[[package]]
+name = "anyio"
+version = "4.10.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
+ { name = "idna" },
+ { name = "sniffio" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" },
+]
+
+[[package]]
+name = "attrs"
+version = "25.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" },
+]
+
+[[package]]
+name = "audioop-lts"
+version = "0.2.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/38/53/946db57842a50b2da2e0c1e34bd37f36f5aadba1a929a3971c5d7841dbca/audioop_lts-0.2.2.tar.gz", hash = "sha256:64d0c62d88e67b98a1a5e71987b7aa7b5bcffc7dcee65b635823dbdd0a8dbbd0", size = 30686, upload-time = "2025-08-05T16:43:17.409Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/de/d4/94d277ca941de5a507b07f0b592f199c22454eeaec8f008a286b3fbbacd6/audioop_lts-0.2.2-cp313-abi3-macosx_10_13_universal2.whl", hash = "sha256:fd3d4602dc64914d462924a08c1a9816435a2155d74f325853c1f1ac3b2d9800", size = 46523, upload-time = "2025-08-05T16:42:20.836Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/5a/656d1c2da4b555920ce4177167bfeb8623d98765594af59702c8873f60ec/audioop_lts-0.2.2-cp313-abi3-macosx_10_13_x86_64.whl", hash = "sha256:550c114a8df0aafe9a05442a1162dfc8fec37e9af1d625ae6060fed6e756f303", size = 27455, upload-time = "2025-08-05T16:42:22.283Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/83/ea581e364ce7b0d41456fb79d6ee0ad482beda61faf0cab20cbd4c63a541/audioop_lts-0.2.2-cp313-abi3-macosx_11_0_arm64.whl", hash = "sha256:9a13dc409f2564de15dd68be65b462ba0dde01b19663720c68c1140c782d1d75", size = 26997, upload-time = "2025-08-05T16:42:23.849Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/3b/e8964210b5e216e5041593b7d33e97ee65967f17c282e8510d19c666dab4/audioop_lts-0.2.2-cp313-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:51c916108c56aa6e426ce611946f901badac950ee2ddaf302b7ed35d9958970d", size = 85844, upload-time = "2025-08-05T16:42:25.208Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/2e/0a1c52faf10d51def20531a59ce4c706cb7952323b11709e10de324d6493/audioop_lts-0.2.2-cp313-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47eba38322370347b1c47024defbd36374a211e8dd5b0dcbce7b34fdb6f8847b", size = 85056, upload-time = "2025-08-05T16:42:26.559Z" },
+ { url = "https://files.pythonhosted.org/packages/75/e8/cd95eef479656cb75ab05dfece8c1f8c395d17a7c651d88f8e6e291a63ab/audioop_lts-0.2.2-cp313-abi3-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba7c3a7e5f23e215cb271516197030c32aef2e754252c4c70a50aaff7031a2c8", size = 93892, upload-time = "2025-08-05T16:42:27.902Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/1e/a0c42570b74f83efa5cca34905b3eef03f7ab09fe5637015df538a7f3345/audioop_lts-0.2.2-cp313-abi3-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:def246fe9e180626731b26e89816e79aae2276f825420a07b4a647abaa84becc", size = 96660, upload-time = "2025-08-05T16:42:28.9Z" },
+ { url = "https://files.pythonhosted.org/packages/50/d5/8a0ae607ca07dbb34027bac8db805498ee7bfecc05fd2c148cc1ed7646e7/audioop_lts-0.2.2-cp313-abi3-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e160bf9df356d841bb6c180eeeea1834085464626dc1b68fa4e1d59070affdc3", size = 79143, upload-time = "2025-08-05T16:42:29.929Z" },
+ { url = "https://files.pythonhosted.org/packages/12/17/0d28c46179e7910bfb0bb62760ccb33edb5de973052cb2230b662c14ca2e/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4b4cd51a57b698b2d06cb9993b7ac8dfe89a3b2878e96bc7948e9f19ff51dba6", size = 84313, upload-time = "2025-08-05T16:42:30.949Z" },
+ { url = "https://files.pythonhosted.org/packages/84/ba/bd5d3806641564f2024e97ca98ea8f8811d4e01d9b9f9831474bc9e14f9e/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_ppc64le.whl", hash = "sha256:4a53aa7c16a60a6857e6b0b165261436396ef7293f8b5c9c828a3a203147ed4a", size = 93044, upload-time = "2025-08-05T16:42:31.959Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/5e/435ce8d5642f1f7679540d1e73c1c42d933331c0976eb397d1717d7f01a3/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_riscv64.whl", hash = "sha256:3fc38008969796f0f689f1453722a0f463da1b8a6fbee11987830bfbb664f623", size = 78766, upload-time = "2025-08-05T16:42:33.302Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/3b/b909e76b606cbfd53875693ec8c156e93e15a1366a012f0b7e4fb52d3c34/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_s390x.whl", hash = "sha256:15ab25dd3e620790f40e9ead897f91e79c0d3ce65fe193c8ed6c26cffdd24be7", size = 87640, upload-time = "2025-08-05T16:42:34.854Z" },
+ { url = "https://files.pythonhosted.org/packages/30/e7/8f1603b4572d79b775f2140d7952f200f5e6c62904585d08a01f0a70393a/audioop_lts-0.2.2-cp313-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:03f061a1915538fd96272bac9551841859dbb2e3bf73ebe4a23ef043766f5449", size = 86052, upload-time = "2025-08-05T16:42:35.839Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/96/c37846df657ccdda62ba1ae2b6534fa90e2e1b1742ca8dcf8ebd38c53801/audioop_lts-0.2.2-cp313-abi3-win32.whl", hash = "sha256:3bcddaaf6cc5935a300a8387c99f7a7fbbe212a11568ec6cf6e4bc458c048636", size = 26185, upload-time = "2025-08-05T16:42:37.04Z" },
+ { url = "https://files.pythonhosted.org/packages/34/a5/9d78fdb5b844a83da8a71226c7bdae7cc638861085fff7a1d707cb4823fa/audioop_lts-0.2.2-cp313-abi3-win_amd64.whl", hash = "sha256:a2c2a947fae7d1062ef08c4e369e0ba2086049a5e598fda41122535557012e9e", size = 30503, upload-time = "2025-08-05T16:42:38.427Z" },
+ { url = "https://files.pythonhosted.org/packages/34/25/20d8fde083123e90c61b51afb547bb0ea7e77bab50d98c0ab243d02a0e43/audioop_lts-0.2.2-cp313-abi3-win_arm64.whl", hash = "sha256:5f93a5db13927a37d2d09637ccca4b2b6b48c19cd9eda7b17a2e9f77edee6a6f", size = 24173, upload-time = "2025-08-05T16:42:39.704Z" },
+ { url = "https://files.pythonhosted.org/packages/58/a7/0a764f77b5c4ac58dc13c01a580f5d32ae8c74c92020b961556a43e26d02/audioop_lts-0.2.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:73f80bf4cd5d2ca7814da30a120de1f9408ee0619cc75da87d0641273d202a09", size = 47096, upload-time = "2025-08-05T16:42:40.684Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/ed/ebebedde1a18848b085ad0fa54b66ceb95f1f94a3fc04f1cd1b5ccb0ed42/audioop_lts-0.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:106753a83a25ee4d6f473f2be6b0966fc1c9af7e0017192f5531a3e7463dce58", size = 27748, upload-time = "2025-08-05T16:42:41.992Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/6e/11ca8c21af79f15dbb1c7f8017952ee8c810c438ce4e2b25638dfef2b02c/audioop_lts-0.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fbdd522624141e40948ab3e8cdae6e04c748d78710e9f0f8d4dae2750831de19", size = 27329, upload-time = "2025-08-05T16:42:42.987Z" },
+ { url = "https://files.pythonhosted.org/packages/84/52/0022f93d56d85eec5da6b9da6a958a1ef09e80c39f2cc0a590c6af81dcbb/audioop_lts-0.2.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:143fad0311e8209ece30a8dbddab3b65ab419cbe8c0dde6e8828da25999be911", size = 92407, upload-time = "2025-08-05T16:42:44.336Z" },
+ { url = "https://files.pythonhosted.org/packages/87/1d/48a889855e67be8718adbc7a01f3c01d5743c325453a5e81cf3717664aad/audioop_lts-0.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dfbbc74ec68a0fd08cfec1f4b5e8cca3d3cd7de5501b01c4b5d209995033cde9", size = 91811, upload-time = "2025-08-05T16:42:45.325Z" },
+ { url = "https://files.pythonhosted.org/packages/98/a6/94b7213190e8077547ffae75e13ed05edc488653c85aa5c41472c297d295/audioop_lts-0.2.2-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cfcac6aa6f42397471e4943e0feb2244549db5c5d01efcd02725b96af417f3fe", size = 100470, upload-time = "2025-08-05T16:42:46.468Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/e9/78450d7cb921ede0cfc33426d3a8023a3bda755883c95c868ee36db8d48d/audioop_lts-0.2.2-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:752d76472d9804ac60f0078c79cdae8b956f293177acd2316cd1e15149aee132", size = 103878, upload-time = "2025-08-05T16:42:47.576Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/e2/cd5439aad4f3e34ae1ee852025dc6aa8f67a82b97641e390bf7bd9891d3e/audioop_lts-0.2.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:83c381767e2cc10e93e40281a04852facc4cd9334550e0f392f72d1c0a9c5753", size = 84867, upload-time = "2025-08-05T16:42:49.003Z" },
+ { url = "https://files.pythonhosted.org/packages/68/4b/9d853e9076c43ebba0d411e8d2aa19061083349ac695a7d082540bad64d0/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c0022283e9556e0f3643b7c3c03f05063ca72b3063291834cca43234f20c60bb", size = 90001, upload-time = "2025-08-05T16:42:50.038Z" },
+ { url = "https://files.pythonhosted.org/packages/58/26/4bae7f9d2f116ed5593989d0e521d679b0d583973d203384679323d8fa85/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:a2d4f1513d63c795e82948e1305f31a6d530626e5f9f2605408b300ae6095093", size = 99046, upload-time = "2025-08-05T16:42:51.111Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/67/a9f4fb3e250dda9e9046f8866e9fa7d52664f8985e445c6b4ad6dfb55641/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:c9c8e68d8b4a56fda8c025e538e639f8c5953f5073886b596c93ec9b620055e7", size = 84788, upload-time = "2025-08-05T16:42:52.198Z" },
+ { url = "https://files.pythonhosted.org/packages/70/f7/3de86562db0121956148bcb0fe5b506615e3bcf6e63c4357a612b910765a/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:96f19de485a2925314f5020e85911fb447ff5fbef56e8c7c6927851b95533a1c", size = 94472, upload-time = "2025-08-05T16:42:53.59Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/32/fd772bf9078ae1001207d2df1eef3da05bea611a87dd0e8217989b2848fa/audioop_lts-0.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e541c3ef484852ef36545f66209444c48b28661e864ccadb29daddb6a4b8e5f5", size = 92279, upload-time = "2025-08-05T16:42:54.632Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/41/affea7181592ab0ab560044632571a38edaf9130b84928177823fbf3176a/audioop_lts-0.2.2-cp313-cp313t-win32.whl", hash = "sha256:d5e73fa573e273e4f2e5ff96f9043858a5e9311e94ffefd88a3186a910c70917", size = 26568, upload-time = "2025-08-05T16:42:55.627Z" },
+ { url = "https://files.pythonhosted.org/packages/28/2b/0372842877016641db8fc54d5c88596b542eec2f8f6c20a36fb6612bf9ee/audioop_lts-0.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9191d68659eda01e448188f60364c7763a7ca6653ed3f87ebb165822153a8547", size = 30942, upload-time = "2025-08-05T16:42:56.674Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/ca/baf2b9cc7e96c179bb4a54f30fcd83e6ecb340031bde68f486403f943768/audioop_lts-0.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:c174e322bb5783c099aaf87faeb240c8d210686b04bd61dfd05a8e5a83d88969", size = 24603, upload-time = "2025-08-05T16:42:57.571Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/73/413b5a2804091e2c7d5def1d618e4837f1cb82464e230f827226278556b7/audioop_lts-0.2.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f9ee9b52f5f857fbaf9d605a360884f034c92c1c23021fb90b2e39b8e64bede6", size = 47104, upload-time = "2025-08-05T16:42:58.518Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/8c/daa3308dc6593944410c2c68306a5e217f5c05b70a12e70228e7dd42dc5c/audioop_lts-0.2.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:49ee1a41738a23e98d98b937a0638357a2477bc99e61b0f768a8f654f45d9b7a", size = 27754, upload-time = "2025-08-05T16:43:00.132Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/86/c2e0f627168fcf61781a8f72cab06b228fe1da4b9fa4ab39cfb791b5836b/audioop_lts-0.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5b00be98ccd0fc123dcfad31d50030d25fcf31488cde9e61692029cd7394733b", size = 27332, upload-time = "2025-08-05T16:43:01.666Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/bd/35dce665255434f54e5307de39e31912a6f902d4572da7c37582809de14f/audioop_lts-0.2.2-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a6d2e0f9f7a69403e388894d4ca5ada5c47230716a03f2847cfc7bd1ecb589d6", size = 92396, upload-time = "2025-08-05T16:43:02.991Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/d2/deeb9f51def1437b3afa35aeb729d577c04bcd89394cb56f9239a9f50b6f/audioop_lts-0.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f9b0b8a03ef474f56d1a842af1a2e01398b8f7654009823c6d9e0ecff4d5cfbf", size = 91811, upload-time = "2025-08-05T16:43:04.096Z" },
+ { url = "https://files.pythonhosted.org/packages/76/3b/09f8b35b227cee28cc8231e296a82759ed80c1a08e349811d69773c48426/audioop_lts-0.2.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2b267b70747d82125f1a021506565bdc5609a2b24bcb4773c16d79d2bb260bbd", size = 100483, upload-time = "2025-08-05T16:43:05.085Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/15/05b48a935cf3b130c248bfdbdea71ce6437f5394ee8533e0edd7cfd93d5e/audioop_lts-0.2.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0337d658f9b81f4cd0fdb1f47635070cc084871a3d4646d9de74fdf4e7c3d24a", size = 103885, upload-time = "2025-08-05T16:43:06.197Z" },
+ { url = "https://files.pythonhosted.org/packages/83/80/186b7fce6d35b68d3d739f228dc31d60b3412105854edb975aa155a58339/audioop_lts-0.2.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:167d3b62586faef8b6b2275c3218796b12621a60e43f7e9d5845d627b9c9b80e", size = 84899, upload-time = "2025-08-05T16:43:07.291Z" },
+ { url = "https://files.pythonhosted.org/packages/49/89/c78cc5ac6cb5828f17514fb12966e299c850bc885e80f8ad94e38d450886/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0d9385e96f9f6da847f4d571ce3cb15b5091140edf3db97276872647ce37efd7", size = 89998, upload-time = "2025-08-05T16:43:08.335Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/4b/6401888d0c010e586c2ca50fce4c903d70a6bb55928b16cfbdfd957a13da/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:48159d96962674eccdca9a3df280e864e8ac75e40a577cc97c5c42667ffabfc5", size = 99046, upload-time = "2025-08-05T16:43:09.367Z" },
+ { url = "https://files.pythonhosted.org/packages/de/f8/c874ca9bb447dae0e2ef2e231f6c4c2b0c39e31ae684d2420b0f9e97ee68/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:8fefe5868cd082db1186f2837d64cfbfa78b548ea0d0543e9b28935ccce81ce9", size = 84843, upload-time = "2025-08-05T16:43:10.749Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/c0/0323e66f3daebc13fd46b36b30c3be47e3fc4257eae44f1e77eb828c703f/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:58cf54380c3884fb49fdd37dfb7a772632b6701d28edd3e2904743c5e1773602", size = 94490, upload-time = "2025-08-05T16:43:12.131Z" },
+ { url = "https://files.pythonhosted.org/packages/98/6b/acc7734ac02d95ab791c10c3f17ffa3584ccb9ac5c18fd771c638ed6d1f5/audioop_lts-0.2.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:088327f00488cdeed296edd9215ca159f3a5a5034741465789cad403fcf4bec0", size = 92297, upload-time = "2025-08-05T16:43:13.139Z" },
+ { url = "https://files.pythonhosted.org/packages/13/c3/c3dc3f564ce6877ecd2a05f8d751b9b27a8c320c2533a98b0c86349778d0/audioop_lts-0.2.2-cp314-cp314t-win32.whl", hash = "sha256:068aa17a38b4e0e7de771c62c60bbca2455924b67a8814f3b0dee92b5820c0b3", size = 27331, upload-time = "2025-08-05T16:43:14.19Z" },
+ { url = "https://files.pythonhosted.org/packages/72/bb/b4608537e9ffcb86449091939d52d24a055216a36a8bf66b936af8c3e7ac/audioop_lts-0.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:a5bf613e96f49712073de86f20dbdd4014ca18efd4d34ed18c75bd808337851b", size = 31697, upload-time = "2025-08-05T16:43:15.193Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/22/91616fe707a5c5510de2cac9b046a30defe7007ba8a0c04f9c08f27df312/audioop_lts-0.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:b492c3b040153e68b9fdaff5913305aaaba5bb433d8a7f73d5cf6a64ed3cc1dd", size = 25206, upload-time = "2025-08-05T16:43:16.444Z" },
+]
+
+[[package]]
+name = "bitsandbytes"
+version = "0.47.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy" },
+ { name = "torch" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/aa/eb/477d6b5602f469c7305fd43eec71d890c39909f615c1d7138f6e7d226eff/bitsandbytes-0.47.0-py3-none-manylinux_2_24_aarch64.whl", hash = "sha256:2f805b76891a596025e9e13318b675d08481b9ee650d65e5d2f9d844084c6521", size = 30004641, upload-time = "2025-08-11T18:51:20.524Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/40/91f1a5a694f434bc13cba160045fdc4e867032e627b001bf411048fefd9c/bitsandbytes-0.47.0-py3-none-manylinux_2_24_x86_64.whl", hash = "sha256:68f3fffd494a47ed1fd7593bfc5dd2ac69b68260599b71b4c4b3a32f90f3b184", size = 61284639, upload-time = "2025-08-11T18:51:23.581Z" },
+ { url = "https://files.pythonhosted.org/packages/18/a9/e07a227f1cd6562844cea2f05ee576b0991a9a91f45965c06034178ba0f6/bitsandbytes-0.47.0-py3-none-win_amd64.whl", hash = "sha256:4880a6d42ca9628b5a571c8cc3093dc3f5f52511e5a9e47d52d569807975531a", size = 60725121, upload-time = "2025-08-11T18:51:27.543Z" },
+]
+
+[[package]]
+name = "blinker"
+version = "1.9.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460, upload-time = "2024-11-08T17:25:47.436Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" },
+]
+
+[[package]]
+name = "brotli"
+version = "1.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/2f/c2/f9e977608bdf958650638c3f1e28f85a1b075f075ebbe77db8555463787b/Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724", size = 7372270, upload-time = "2023-09-07T14:05:41.643Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6d/3a/dbf4fb970c1019a57b5e492e1e0eae745d32e59ba4d6161ab5422b08eefe/Brotli-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752", size = 873045, upload-time = "2023-09-07T14:03:16.894Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/11/afc14026ea7f44bd6eb9316d800d439d092c8d508752055ce8d03086079a/Brotli-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9", size = 446218, upload-time = "2023-09-07T14:03:18.917Z" },
+ { url = "https://files.pythonhosted.org/packages/36/83/7545a6e7729db43cb36c4287ae388d6885c85a86dd251768a47015dfde32/Brotli-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ae56aca0402a0f9a3431cddda62ad71666ca9d4dc3a10a142b9dce2e3c0cda3", size = 2903872, upload-time = "2023-09-07T14:03:20.398Z" },
+ { url = "https://files.pythonhosted.org/packages/32/23/35331c4d9391fcc0f29fd9bec2c76e4b4eeab769afbc4b11dd2e1098fb13/Brotli-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43ce1b9935bfa1ede40028054d7f48b5469cd02733a365eec8a329ffd342915d", size = 2941254, upload-time = "2023-09-07T14:03:21.914Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/24/1671acb450c902edb64bd765d73603797c6c7280a9ada85a195f6b78c6e5/Brotli-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7c4855522edb2e6ae7fdb58e07c3ba9111e7621a8956f481c68d5d979c93032e", size = 2857293, upload-time = "2023-09-07T14:03:24Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/00/40f760cc27007912b327fe15bf6bfd8eaecbe451687f72a8abc587d503b3/Brotli-1.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:38025d9f30cf4634f8309c6874ef871b841eb3c347e90b0851f63d1ded5212da", size = 3002385, upload-time = "2023-09-07T14:03:26.248Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/cb/8aaa83f7a4caa131757668c0fb0c4b6384b09ffa77f2fba9570d87ab587d/Brotli-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6a904cb26bfefc2f0a6f240bdf5233be78cd2488900a2f846f3c3ac8489ab80", size = 2911104, upload-time = "2023-09-07T14:03:27.849Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/c4/65456561d89d3c49f46b7fbeb8fe6e449f13bdc8ea7791832c5d476b2faf/Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d", size = 2809981, upload-time = "2023-09-07T14:03:29.92Z" },
+ { url = "https://files.pythonhosted.org/packages/05/1b/cf49528437bae28abce5f6e059f0d0be6fecdcc1d3e33e7c54b3ca498425/Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0", size = 2935297, upload-time = "2023-09-07T14:03:32.035Z" },
+ { url = "https://files.pythonhosted.org/packages/81/ff/190d4af610680bf0c5a09eb5d1eac6e99c7c8e216440f9c7cfd42b7adab5/Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e", size = 2930735, upload-time = "2023-09-07T14:03:33.801Z" },
+ { url = "https://files.pythonhosted.org/packages/80/7d/f1abbc0c98f6e09abd3cad63ec34af17abc4c44f308a7a539010f79aae7a/Brotli-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5dab0844f2cf82be357a0eb11a9087f70c5430b2c241493fc122bb6f2bb0917c", size = 2933107, upload-time = "2024-10-18T12:32:09.016Z" },
+ { url = "https://files.pythonhosted.org/packages/34/ce/5a5020ba48f2b5a4ad1c0522d095ad5847a0be508e7d7569c8630ce25062/Brotli-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4fe605b917c70283db7dfe5ada75e04561479075761a0b3866c081d035b01c1", size = 2845400, upload-time = "2024-10-18T12:32:11.134Z" },
+ { url = "https://files.pythonhosted.org/packages/44/89/fa2c4355ab1eecf3994e5a0a7f5492c6ff81dfcb5f9ba7859bd534bb5c1a/Brotli-1.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1e9a65b5736232e7a7f91ff3d02277f11d339bf34099a56cdab6a8b3410a02b2", size = 3031985, upload-time = "2024-10-18T12:32:12.813Z" },
+ { url = "https://files.pythonhosted.org/packages/af/a4/79196b4a1674143d19dca400866b1a4d1a089040df7b93b88ebae81f3447/Brotli-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:58d4b711689366d4a03ac7957ab8c28890415e267f9b6589969e74b6e42225ec", size = 2927099, upload-time = "2024-10-18T12:32:14.733Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/54/1c0278556a097f9651e657b873ab08f01b9a9ae4cac128ceb66427d7cd20/Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2", size = 333172, upload-time = "2023-09-07T14:03:35.212Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/65/b785722e941193fd8b571afd9edbec2a9b838ddec4375d8af33a50b8dab9/Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128", size = 357255, upload-time = "2023-09-07T14:03:36.447Z" },
+ { url = "https://files.pythonhosted.org/packages/96/12/ad41e7fadd5db55459c4c401842b47f7fee51068f86dd2894dd0dcfc2d2a/Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc", size = 873068, upload-time = "2023-09-07T14:03:37.779Z" },
+ { url = "https://files.pythonhosted.org/packages/95/4e/5afab7b2b4b61a84e9c75b17814198ce515343a44e2ed4488fac314cd0a9/Brotli-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8146669223164fc87a7e3de9f81e9423c67a79d6b3447994dfb9c95da16e2d6", size = 446244, upload-time = "2023-09-07T14:03:39.223Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/e6/f305eb61fb9a8580c525478a4a34c5ae1a9bcb12c3aee619114940bc513d/Brotli-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30924eb4c57903d5a7526b08ef4a584acc22ab1ffa085faceb521521d2de32dd", size = 2906500, upload-time = "2023-09-07T14:03:40.858Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/4f/af6846cfbc1550a3024e5d3775ede1e00474c40882c7bf5b37a43ca35e91/Brotli-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ceb64bbc6eac5a140ca649003756940f8d6a7c444a68af170b3187623b43bebf", size = 2943950, upload-time = "2023-09-07T14:03:42.896Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/e7/ca2993c7682d8629b62630ebf0d1f3bb3d579e667ce8e7ca03a0a0576a2d/Brotli-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a469274ad18dc0e4d316eefa616d1d0c2ff9da369af19fa6f3daa4f09671fd61", size = 2918527, upload-time = "2023-09-07T14:03:44.552Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/96/da98e7bedc4c51104d29cc61e5f449a502dd3dbc211944546a4cc65500d3/Brotli-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524f35912131cc2cabb00edfd8d573b07f2d9f21fa824bd3fb19725a9cf06327", size = 2845489, upload-time = "2023-09-07T14:03:46.594Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/ef/ccbc16947d6ce943a7f57e1a40596c75859eeb6d279c6994eddd69615265/Brotli-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5b3cc074004d968722f51e550b41a27be656ec48f8afaeeb45ebf65b561481dd", size = 2914080, upload-time = "2023-09-07T14:03:48.204Z" },
+ { url = "https://files.pythonhosted.org/packages/80/d6/0bd38d758d1afa62a5524172f0b18626bb2392d717ff94806f741fcd5ee9/Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9", size = 2813051, upload-time = "2023-09-07T14:03:50.348Z" },
+ { url = "https://files.pythonhosted.org/packages/14/56/48859dd5d129d7519e001f06dcfbb6e2cf6db92b2702c0c2ce7d97e086c1/Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265", size = 2938172, upload-time = "2023-09-07T14:03:52.395Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/77/a236d5f8cd9e9f4348da5acc75ab032ab1ab2c03cc8f430d24eea2672888/Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8", size = 2933023, upload-time = "2023-09-07T14:03:53.96Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/87/3b283efc0f5cb35f7f84c0c240b1e1a1003a5e47141a4881bf87c86d0ce2/Brotli-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c247dd99d39e0338a604f8c2b3bc7061d5c2e9e2ac7ba9cc1be5a69cb6cd832f", size = 2935871, upload-time = "2024-10-18T12:32:16.688Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/eb/2be4cc3e2141dc1a43ad4ca1875a72088229de38c68e842746b342667b2a/Brotli-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1b2c248cd517c222d89e74669a4adfa5577e06ab68771a529060cf5a156e9757", size = 2847784, upload-time = "2024-10-18T12:32:18.459Z" },
+ { url = "https://files.pythonhosted.org/packages/66/13/b58ddebfd35edde572ccefe6890cf7c493f0c319aad2a5badee134b4d8ec/Brotli-1.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a24c50840d89ded6c9a8fdc7b6ed3692ed4e86f1c4a4a938e1e92def92933e0", size = 3034905, upload-time = "2024-10-18T12:32:20.192Z" },
+ { url = "https://files.pythonhosted.org/packages/84/9c/bc96b6c7db824998a49ed3b38e441a2cae9234da6fa11f6ed17e8cf4f147/Brotli-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f31859074d57b4639318523d6ffdca586ace54271a73ad23ad021acd807eb14b", size = 2929467, upload-time = "2024-10-18T12:32:21.774Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/71/8f161dee223c7ff7fea9d44893fba953ce97cf2c3c33f78ba260a91bcff5/Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50", size = 333169, upload-time = "2023-09-07T14:03:55.404Z" },
+ { url = "https://files.pythonhosted.org/packages/02/8a/fece0ee1057643cb2a5bbf59682de13f1725f8482b2c057d4e799d7ade75/Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1", size = 357253, upload-time = "2023-09-07T14:03:56.643Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/d0/5373ae13b93fe00095a58efcbce837fd470ca39f703a235d2a999baadfbc/Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28", size = 815693, upload-time = "2024-10-18T12:32:23.824Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/48/f6e1cdf86751300c288c1459724bfa6917a80e30dbfc326f92cea5d3683a/Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f", size = 422489, upload-time = "2024-10-18T12:32:25.641Z" },
+ { url = "https://files.pythonhosted.org/packages/06/88/564958cedce636d0f1bed313381dfc4b4e3d3f6015a63dae6146e1b8c65c/Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409", size = 873081, upload-time = "2023-09-07T14:03:57.967Z" },
+ { url = "https://files.pythonhosted.org/packages/58/79/b7026a8bb65da9a6bb7d14329fd2bd48d2b7f86d7329d5cc8ddc6a90526f/Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2", size = 446244, upload-time = "2023-09-07T14:03:59.319Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/18/c18c32ecea41b6c0004e15606e274006366fe19436b6adccc1ae7b2e50c2/Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451", size = 2906505, upload-time = "2023-09-07T14:04:01.327Z" },
+ { url = "https://files.pythonhosted.org/packages/08/c8/69ec0496b1ada7569b62d85893d928e865df29b90736558d6c98c2031208/Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91", size = 2944152, upload-time = "2023-09-07T14:04:03.033Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/fb/0517cea182219d6768113a38167ef6d4eb157a033178cc938033a552ed6d/Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408", size = 2919252, upload-time = "2023-09-07T14:04:04.675Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/53/73a3431662e33ae61a5c80b1b9d2d18f58dfa910ae8dd696e57d39f1a2f5/Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0", size = 2845955, upload-time = "2023-09-07T14:04:06.585Z" },
+ { url = "https://files.pythonhosted.org/packages/55/ac/bd280708d9c5ebdbf9de01459e625a3e3803cce0784f47d633562cf40e83/Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc", size = 2914304, upload-time = "2023-09-07T14:04:08.668Z" },
+ { url = "https://files.pythonhosted.org/packages/76/58/5c391b41ecfc4527d2cc3350719b02e87cb424ef8ba2023fb662f9bf743c/Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180", size = 2814452, upload-time = "2023-09-07T14:04:10.736Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/4e/91b8256dfe99c407f174924b65a01f5305e303f486cc7a2e8a5d43c8bec3/Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248", size = 2938751, upload-time = "2023-09-07T14:04:12.875Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/a6/e2a39a5d3b412938362bbbeba5af904092bf3f95b867b4a3eb856104074e/Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966", size = 2933757, upload-time = "2023-09-07T14:04:14.551Z" },
+ { url = "https://files.pythonhosted.org/packages/13/f0/358354786280a509482e0e77c1a5459e439766597d280f28cb097642fc26/Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9", size = 2936146, upload-time = "2024-10-18T12:32:27.257Z" },
+ { url = "https://files.pythonhosted.org/packages/80/f7/daf538c1060d3a88266b80ecc1d1c98b79553b3f117a485653f17070ea2a/Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb", size = 2848055, upload-time = "2024-10-18T12:32:29.376Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/cf/0eaa0585c4077d3c2d1edf322d8e97aabf317941d3a72d7b3ad8bce004b0/Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111", size = 3035102, upload-time = "2024-10-18T12:32:31.371Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/63/1c1585b2aa554fe6dbce30f0c18bdbc877fa9a1bf5ff17677d9cca0ac122/Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839", size = 2930029, upload-time = "2024-10-18T12:32:33.293Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/3b/4e3fd1893eb3bbfef8e5a80d4508bec17a57bb92d586c85c12d28666bb13/Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0", size = 333276, upload-time = "2023-09-07T14:04:16.49Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/d5/942051b45a9e883b5b6e98c041698b1eb2012d25e5948c58d6bf85b1bb43/Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951", size = 357255, upload-time = "2023-09-07T14:04:17.83Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/9f/fb37bb8ffc52a8da37b1c03c459a8cd55df7a57bdccd8831d500e994a0ca/Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5", size = 815681, upload-time = "2024-10-18T12:32:34.942Z" },
+ { url = "https://files.pythonhosted.org/packages/06/b3/dbd332a988586fefb0aa49c779f59f47cae76855c2d00f450364bb574cac/Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8", size = 422475, upload-time = "2024-10-18T12:32:36.485Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/80/6aaddc2f63dbcf2d93c2d204e49c11a9ec93a8c7c63261e2b4bd35198283/Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f", size = 2906173, upload-time = "2024-10-18T12:32:37.978Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/1d/e6ca79c96ff5b641df6097d299347507d39a9604bde8915e76bf026d6c77/Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648", size = 2943803, upload-time = "2024-10-18T12:32:39.606Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/a3/d98d2472e0130b7dd3acdbb7f390d478123dbf62b7d32bda5c830a96116d/Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0", size = 2918946, upload-time = "2024-10-18T12:32:41.679Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/a5/c69e6d272aee3e1423ed005d8915a7eaa0384c7de503da987f2d224d0721/Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089", size = 2845707, upload-time = "2024-10-18T12:32:43.478Z" },
+ { url = "https://files.pythonhosted.org/packages/58/9f/4149d38b52725afa39067350696c09526de0125ebfbaab5acc5af28b42ea/Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368", size = 2936231, upload-time = "2024-10-18T12:32:45.224Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/5a/145de884285611838a16bebfdb060c231c52b8f84dfbe52b852a15780386/Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c", size = 2848157, upload-time = "2024-10-18T12:32:46.894Z" },
+ { url = "https://files.pythonhosted.org/packages/50/ae/408b6bfb8525dadebd3b3dd5b19d631da4f7d46420321db44cd99dcf2f2c/Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284", size = 3035122, upload-time = "2024-10-18T12:32:48.844Z" },
+ { url = "https://files.pythonhosted.org/packages/af/85/a94e5cfaa0ca449d8f91c3d6f78313ebf919a0dbd55a100c711c6e9655bc/Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7", size = 2930206, upload-time = "2024-10-18T12:32:51.198Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/f0/a61d9262cd01351df22e57ad7c34f66794709acab13f34be2675f45bf89d/Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0", size = 333804, upload-time = "2024-10-18T12:32:52.661Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/c1/ec214e9c94000d1c1974ec67ced1c970c148aa6b8d8373066123fc3dbf06/Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b", size = 358517, upload-time = "2024-10-18T12:32:54.066Z" },
+]
+
+[[package]]
+name = "cachetools"
+version = "6.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/9d/61/e4fad8155db4a04bfb4734c7c8ff0882f078f24294d42798b3568eb63bff/cachetools-6.2.0.tar.gz", hash = "sha256:38b328c0889450f05f5e120f56ab68c8abaf424e1275522b138ffc93253f7e32", size = 30988, upload-time = "2025-08-25T18:57:30.924Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6c/56/3124f61d37a7a4e7cc96afc5492c78ba0cb551151e530b54669ddd1436ef/cachetools-6.2.0-py3-none-any.whl", hash = "sha256:1c76a8960c0041fcc21097e357f882197c79da0dbff766e7317890a65d7d8ba6", size = 11276, upload-time = "2025-08-25T18:57:29.684Z" },
+]
+
+[[package]]
+name = "certifi"
+version = "2025.8.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload-time = "2025-08-03T03:07:47.08Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload-time = "2025-08-03T03:07:45.777Z" },
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.4.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload-time = "2025-08-09T07:57:28.46Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d6/98/f3b8013223728a99b908c9344da3aa04ee6e3fa235f19409033eda92fb78/charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72", size = 207695, upload-time = "2025-08-09T07:55:36.452Z" },
+ { url = "https://files.pythonhosted.org/packages/21/40/5188be1e3118c82dcb7c2a5ba101b783822cfb413a0268ed3be0468532de/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe", size = 147153, upload-time = "2025-08-09T07:55:38.467Z" },
+ { url = "https://files.pythonhosted.org/packages/37/60/5d0d74bc1e1380f0b72c327948d9c2aca14b46a9efd87604e724260f384c/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601", size = 160428, upload-time = "2025-08-09T07:55:40.072Z" },
+ { url = "https://files.pythonhosted.org/packages/85/9a/d891f63722d9158688de58d050c59dc3da560ea7f04f4c53e769de5140f5/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c", size = 157627, upload-time = "2025-08-09T07:55:41.706Z" },
+ { url = "https://files.pythonhosted.org/packages/65/1a/7425c952944a6521a9cfa7e675343f83fd82085b8af2b1373a2409c683dc/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2", size = 152388, upload-time = "2025-08-09T07:55:43.262Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/c9/a2c9c2a355a8594ce2446085e2ec97fd44d323c684ff32042e2a6b718e1d/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0", size = 150077, upload-time = "2025-08-09T07:55:44.903Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/38/20a1f44e4851aa1c9105d6e7110c9d020e093dfa5836d712a5f074a12bf7/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0", size = 161631, upload-time = "2025-08-09T07:55:46.346Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/fa/384d2c0f57edad03d7bec3ebefb462090d8905b4ff5a2d2525f3bb711fac/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0", size = 159210, upload-time = "2025-08-09T07:55:47.539Z" },
+ { url = "https://files.pythonhosted.org/packages/33/9e/eca49d35867ca2db336b6ca27617deed4653b97ebf45dfc21311ce473c37/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a", size = 153739, upload-time = "2025-08-09T07:55:48.744Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/91/26c3036e62dfe8de8061182d33be5025e2424002125c9500faff74a6735e/charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f", size = 99825, upload-time = "2025-08-09T07:55:50.305Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/c6/f05db471f81af1fa01839d44ae2a8bfeec8d2a8b4590f16c4e7393afd323/charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669", size = 107452, upload-time = "2025-08-09T07:55:51.461Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/b5/991245018615474a60965a7c9cd2b4efbaabd16d582a5547c47ee1c7730b/charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b", size = 204483, upload-time = "2025-08-09T07:55:53.12Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/2a/ae245c41c06299ec18262825c1569c5d3298fc920e4ddf56ab011b417efd/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64", size = 145520, upload-time = "2025-08-09T07:55:54.712Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/a4/b3b6c76e7a635748c4421d2b92c7b8f90a432f98bda5082049af37ffc8e3/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91", size = 158876, upload-time = "2025-08-09T07:55:56.024Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/e6/63bb0e10f90a8243c5def74b5b105b3bbbfb3e7bb753915fe333fb0c11ea/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f", size = 156083, upload-time = "2025-08-09T07:55:57.582Z" },
+ { url = "https://files.pythonhosted.org/packages/87/df/b7737ff046c974b183ea9aa111b74185ac8c3a326c6262d413bd5a1b8c69/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07", size = 150295, upload-time = "2025-08-09T07:55:59.147Z" },
+ { url = "https://files.pythonhosted.org/packages/61/f1/190d9977e0084d3f1dc169acd060d479bbbc71b90bf3e7bf7b9927dec3eb/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30", size = 148379, upload-time = "2025-08-09T07:56:00.364Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/92/27dbe365d34c68cfe0ca76f1edd70e8705d82b378cb54ebbaeabc2e3029d/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14", size = 160018, upload-time = "2025-08-09T07:56:01.678Z" },
+ { url = "https://files.pythonhosted.org/packages/99/04/baae2a1ea1893a01635d475b9261c889a18fd48393634b6270827869fa34/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c", size = 157430, upload-time = "2025-08-09T07:56:02.87Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/36/77da9c6a328c54d17b960c89eccacfab8271fdaaa228305330915b88afa9/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae", size = 151600, upload-time = "2025-08-09T07:56:04.089Z" },
+ { url = "https://files.pythonhosted.org/packages/64/d4/9eb4ff2c167edbbf08cdd28e19078bf195762e9bd63371689cab5ecd3d0d/charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849", size = 99616, upload-time = "2025-08-09T07:56:05.658Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/9c/996a4a028222e7761a96634d1820de8a744ff4327a00ada9c8942033089b/charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c", size = 107108, upload-time = "2025-08-09T07:56:07.176Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655, upload-time = "2025-08-09T07:56:08.475Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223, upload-time = "2025-08-09T07:56:09.708Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366, upload-time = "2025-08-09T07:56:11.326Z" },
+ { url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104, upload-time = "2025-08-09T07:56:13.014Z" },
+ { url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830, upload-time = "2025-08-09T07:56:14.428Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854, upload-time = "2025-08-09T07:56:16.051Z" },
+ { url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670, upload-time = "2025-08-09T07:56:17.314Z" },
+ { url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501, upload-time = "2025-08-09T07:56:18.641Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173, upload-time = "2025-08-09T07:56:20.289Z" },
+ { url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822, upload-time = "2025-08-09T07:56:21.551Z" },
+ { url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543, upload-time = "2025-08-09T07:56:23.115Z" },
+ { url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload-time = "2025-08-09T07:56:24.721Z" },
+ { url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload-time = "2025-08-09T07:56:26.004Z" },
+ { url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload-time = "2025-08-09T07:56:27.25Z" },
+ { url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload-time = "2025-08-09T07:56:28.515Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload-time = "2025-08-09T07:56:29.716Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload-time = "2025-08-09T07:56:30.984Z" },
+ { url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload-time = "2025-08-09T07:56:32.252Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload-time = "2025-08-09T07:56:33.481Z" },
+ { url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload-time = "2025-08-09T07:56:34.739Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload-time = "2025-08-09T07:56:35.981Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload-time = "2025-08-09T07:56:37.339Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342, upload-time = "2025-08-09T07:56:38.687Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995, upload-time = "2025-08-09T07:56:40.048Z" },
+ { url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640, upload-time = "2025-08-09T07:56:41.311Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636, upload-time = "2025-08-09T07:56:43.195Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939, upload-time = "2025-08-09T07:56:44.819Z" },
+ { url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580, upload-time = "2025-08-09T07:56:46.684Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870, upload-time = "2025-08-09T07:56:47.941Z" },
+ { url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797, upload-time = "2025-08-09T07:56:49.756Z" },
+ { url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224, upload-time = "2025-08-09T07:56:51.369Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086, upload-time = "2025-08-09T07:56:52.722Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400, upload-time = "2025-08-09T07:56:55.172Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload-time = "2025-08-09T07:57:26.864Z" },
+]
+
+[[package]]
+name = "click"
+version = "8.2.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" },
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
+]
+
+[[package]]
+name = "decorator"
+version = "4.4.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/da/93/84fa12f2dc341f8cf5f022ee09e109961055749df2d0c75c5f98746cfe6c/decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7", size = 33629, upload-time = "2020-02-29T05:24:43.312Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ed/1b/72a1821152d07cf1d8b6fce298aeb06a7eb90f4d6d41acec9861e7cc6df0/decorator-4.4.2-py2.py3-none-any.whl", hash = "sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760", size = 9239, upload-time = "2020-02-29T05:24:45.993Z" },
+]
+
+[[package]]
+name = "diffusers"
+version = "0.33.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "filelock" },
+ { name = "huggingface-hub" },
+ { name = "importlib-metadata" },
+ { name = "numpy" },
+ { name = "pillow" },
+ { name = "regex" },
+ { name = "requests" },
+ { name = "safetensors" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/82/cc/1ef6bdc99d3864f6d1ee11bdbe3708b9d33ce35e7671557f641897480956/diffusers-0.33.1.tar.gz", hash = "sha256:fc7f140295d2ec82b1e7474b77bb7057fc0686c14eadc54ca0e52a66527e18a2", size = 2896103, upload-time = "2025-04-10T05:20:29.135Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e7/7a/f08f610cea8a3395ad3b4f586db23bedb43c68db6c3261145a15e7b63126/diffusers-0.33.1-py3-none-any.whl", hash = "sha256:027469e74f289338eb24127409f8d60d840b1b7ce4b27ffcd3134fd3b8431567", size = 3554612, upload-time = "2025-04-10T05:20:24.774Z" },
+]
+
+[[package]]
+name = "einops"
+version = "0.8.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e5/81/df4fbe24dff8ba3934af99044188e20a98ed441ad17a274539b74e82e126/einops-0.8.1.tar.gz", hash = "sha256:de5d960a7a761225532e0f1959e5315ebeafc0cd43394732f103ca44b9837e84", size = 54805, upload-time = "2025-02-09T03:17:00.434Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/87/62/9773de14fe6c45c23649e98b83231fffd7b9892b6cf863251dc2afa73643/einops-0.8.1-py3-none-any.whl", hash = "sha256:919387eb55330f5757c6bea9165c5ff5cfe63a642682ea788a6d472576d81737", size = 64359, upload-time = "2025-02-09T03:17:01.998Z" },
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" },
+]
+
+[[package]]
+name = "fastapi"
+version = "0.116.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pydantic" },
+ { name = "starlette" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" },
+]
+
+[[package]]
+name = "ffmpy"
+version = "0.6.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0b/f6/67cadf1686030be511004e75fa1c1397f8f193cd4d15d4788edef7c28621/ffmpy-0.6.1.tar.gz", hash = "sha256:b5830fd05f72bace05b8fb28724d54a7a63c5119d7f74ca36a75df33f749142d", size = 4958, upload-time = "2025-07-22T12:08:22.276Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/74/d4/1806897b31c480efc4e97c22506ac46c716084f573aef780bb7fb7a16e8a/ffmpy-0.6.1-py3-none-any.whl", hash = "sha256:69a37e2d7d6feb840e233d5640f3499a8b0a8657336774c86e4c52a3219222d4", size = 5512, upload-time = "2025-07-22T12:08:21.176Z" },
+]
+
+[[package]]
+name = "filelock"
+version = "3.19.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" },
+]
+
+[[package]]
+name = "flash-attn"
+version = "2.8.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "einops" },
+ { name = "torch" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3b/b2/8d76c41ad7974ee264754709c22963447f7f8134613fd9ce80984ed0dab7/flash_attn-2.8.3.tar.gz", hash = "sha256:1e71dd64a9e0280e0447b8a0c2541bad4bf6ac65bdeaa2f90e51a9e57de0370d", size = 8447812, upload-time = "2025-08-15T08:28:12.911Z" }
+
+[[package]]
+name = "fsspec"
+version = "2025.9.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/de/e0/bab50af11c2d75c9c4a2a26a5254573c0bd97cea152254401510950486fa/fsspec-2025.9.0.tar.gz", hash = "sha256:19fd429483d25d28b65ec68f9f4adc16c17ea2c7c7bf54ec61360d478fb19c19", size = 304847, upload-time = "2025-09-02T19:10:49.215Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289, upload-time = "2025-09-02T19:10:47.708Z" },
+]
+
+[[package]]
+name = "gitdb"
+version = "4.0.12"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "smmap" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" },
+]
+
+[[package]]
+name = "gitpython"
+version = "3.1.45"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "gitdb" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" },
+]
+
+[[package]]
+name = "gradio"
+version = "5.44.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "aiofiles" },
+ { name = "anyio" },
+ { name = "audioop-lts", marker = "python_full_version >= '3.13'" },
+ { name = "brotli" },
+ { name = "fastapi" },
+ { name = "ffmpy" },
+ { name = "gradio-client" },
+ { name = "groovy" },
+ { name = "httpx" },
+ { name = "huggingface-hub" },
+ { name = "jinja2" },
+ { name = "markupsafe" },
+ { name = "numpy" },
+ { name = "orjson" },
+ { name = "packaging" },
+ { name = "pandas" },
+ { name = "pillow" },
+ { name = "pydantic" },
+ { name = "pydub" },
+ { name = "python-multipart" },
+ { name = "pyyaml" },
+ { name = "ruff", marker = "sys_platform != 'emscripten'" },
+ { name = "safehttpx" },
+ { name = "semantic-version" },
+ { name = "starlette", marker = "sys_platform != 'emscripten'" },
+ { name = "tomlkit" },
+ { name = "typer", marker = "sys_platform != 'emscripten'" },
+ { name = "typing-extensions" },
+ { name = "urllib3", marker = "sys_platform == 'emscripten'" },
+ { name = "uvicorn", marker = "sys_platform != 'emscripten'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/18/ea/a9b3ded7913ca22eda86a56ddae8f7523963eb58ae60606015c9466f1950/gradio-5.44.1.tar.gz", hash = "sha256:8527837aa6de4b0d2398dab11baac8e3eac9da69140ed0da6efc6ac497fa818d", size = 72141502, upload-time = "2025-08-29T00:09:45.678Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e1/2f/8c2f8822217061888b68a8610c556d053983fe8759273c9a6fcf3f2fabca/gradio-5.44.1-py3-none-any.whl", hash = "sha256:cb22dd519c3bb2f8c7960cdcc23ca3b869511c85e320f486d7aef6e3627f97b9", size = 60195018, upload-time = "2025-08-29T00:09:40.879Z" },
+]
+
+[[package]]
+name = "gradio-client"
+version = "1.12.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "fsspec" },
+ { name = "httpx" },
+ { name = "huggingface-hub" },
+ { name = "packaging" },
+ { name = "typing-extensions" },
+ { name = "websockets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6f/67/b3a7cba3ec31eb00c6fcb4d6df6cce94dd9a4fbc5ae3eb9b20f18e1c1040/gradio_client-1.12.1.tar.gz", hash = "sha256:64ae7b1d951482194e3a2f8d20cd3fbdaaa13418ee988445d3c9edb28da50ea2", size = 322580, upload-time = "2025-08-19T20:25:44.668Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/77/95/e248cabea8c5b1eaa69c0e4742e4d4cbb3708272670917daf8eef2f78aa1/gradio_client-1.12.1-py3-none-any.whl", hash = "sha256:37c0bcd0e6b3794b2b2e0b5039696d6962d8125bdb96960ad1b79412326b1664", size = 324611, upload-time = "2025-08-19T20:25:42.933Z" },
+]
+
+[[package]]
+name = "groovy"
+version = "0.1.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/52/36/bbdede67400277bef33d3ec0e6a31750da972c469f75966b4930c753218f/groovy-0.1.2.tar.gz", hash = "sha256:25c1dc09b3f9d7e292458aa762c6beb96ea037071bf5e917fc81fb78d2231083", size = 17325, upload-time = "2025-02-28T20:24:56.068Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl", hash = "sha256:7f7975bab18c729a257a8b1ae9dcd70b7cafb1720481beae47719af57c35fa64", size = 14090, upload-time = "2025-02-28T20:24:55.152Z" },
+]
+
+[[package]]
+name = "h11"
+version = "0.16.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
+]
+
+[[package]]
+name = "hf-xet"
+version = "1.1.9"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/23/0f/5b60fc28ee7f8cc17a5114a584fd6b86e11c3e0a6e142a7f97a161e9640a/hf_xet-1.1.9.tar.gz", hash = "sha256:c99073ce404462e909f1d5839b2d14a3827b8fe75ed8aed551ba6609c026c803", size = 484242, upload-time = "2025-08-27T23:05:19.441Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/de/12/56e1abb9a44cdef59a411fe8a8673313195711b5ecce27880eb9c8fa90bd/hf_xet-1.1.9-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:a3b6215f88638dd7a6ff82cb4e738dcbf3d863bf667997c093a3c990337d1160", size = 2762553, upload-time = "2025-08-27T23:05:15.153Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/e6/2d0d16890c5f21b862f5df3146519c182e7f0ae49b4b4bf2bd8a40d0b05e/hf_xet-1.1.9-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:9b486de7a64a66f9a172f4b3e0dfe79c9f0a93257c501296a2521a13495a698a", size = 2623216, upload-time = "2025-08-27T23:05:13.778Z" },
+ { url = "https://files.pythonhosted.org/packages/81/42/7e6955cf0621e87491a1fb8cad755d5c2517803cea174229b0ec00ff0166/hf_xet-1.1.9-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4c5a840c2c4e6ec875ed13703a60e3523bc7f48031dfd750923b2a4d1a5fc3c", size = 3186789, upload-time = "2025-08-27T23:05:12.368Z" },
+ { url = "https://files.pythonhosted.org/packages/df/8b/759233bce05457f5f7ec062d63bbfd2d0c740b816279eaaa54be92aa452a/hf_xet-1.1.9-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:96a6139c9e44dad1c52c52520db0fffe948f6bce487cfb9d69c125f254bb3790", size = 3088747, upload-time = "2025-08-27T23:05:10.439Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/3c/28cc4db153a7601a996985bcb564f7b8f5b9e1a706c7537aad4b4809f358/hf_xet-1.1.9-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ad1022e9a998e784c97b2173965d07fe33ee26e4594770b7785a8cc8f922cd95", size = 3251429, upload-time = "2025-08-27T23:05:16.471Z" },
+ { url = "https://files.pythonhosted.org/packages/84/17/7caf27a1d101bfcb05be85850d4aa0a265b2e1acc2d4d52a48026ef1d299/hf_xet-1.1.9-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:86754c2d6d5afb11b0a435e6e18911a4199262fe77553f8c50d75e21242193ea", size = 3354643, upload-time = "2025-08-27T23:05:17.828Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/50/0c39c9eed3411deadcc98749a6699d871b822473f55fe472fad7c01ec588/hf_xet-1.1.9-cp37-abi3-win_amd64.whl", hash = "sha256:5aad3933de6b725d61d51034e04174ed1dce7a57c63d530df0014dea15a40127", size = 2804797, upload-time = "2025-08-27T23:05:20.77Z" },
+]
+
+[[package]]
+name = "httpcore"
+version = "1.0.9"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "h11" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
+]
+
+[[package]]
+name = "httpx"
+version = "0.28.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+ { name = "certifi" },
+ { name = "httpcore" },
+ { name = "idna" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
+]
+
+[[package]]
+name = "huggingface-hub"
+version = "0.34.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "filelock" },
+ { name = "fsspec" },
+ { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" },
+ { name = "packaging" },
+ { name = "pyyaml" },
+ { name = "requests" },
+ { name = "tqdm" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/45/c9/bdbe19339f76d12985bc03572f330a01a93c04dffecaaea3061bdd7fb892/huggingface_hub-0.34.4.tar.gz", hash = "sha256:a4228daa6fb001be3f4f4bdaf9a0db00e1739235702848df00885c9b5742c85c", size = 459768, upload-time = "2025-08-08T09:14:52.365Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/39/7b/bb06b061991107cd8783f300adff3e7b7f284e330fd82f507f2a1417b11d/huggingface_hub-0.34.4-py3-none-any.whl", hash = "sha256:9b365d781739c93ff90c359844221beef048403f1bc1f1c123c191257c3c890a", size = 561452, upload-time = "2025-08-08T09:14:50.159Z" },
+]
+
+[[package]]
+name = "idna"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
+]
+
+[[package]]
+name = "imageio"
+version = "2.34.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy" },
+ { name = "pillow" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/da/bd/d79206fe42f88d9a99ee952420eebc5368bcd062e17b484a21928462d98e/imageio-2.34.2.tar.gz", hash = "sha256:5c0c0ee8faa018a1c42f649b90395dd4d3bb6187c09053a0cd6f1fdd51bbff5e", size = 387689, upload-time = "2024-06-24T08:09:35.672Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3d/84/f1647217231f6cc46883e5d26e870cc3e1520d458ecd52d6df750810d53c/imageio-2.34.2-py3-none-any.whl", hash = "sha256:a0bb27ec9d5bab36a9f4835e51b21d2cb099e1f78451441f94687ff3404b79f8", size = 313455, upload-time = "2024-06-24T08:09:33.12Z" },
+]
+
+[[package]]
+name = "imageio-ffmpeg"
+version = "0.5.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "setuptools" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/73/22/b0a0d96ecdbd4a8493c6cd7914a8b2bfbc39f8660f81c20e3bde847182e0/imageio-ffmpeg-0.5.1.tar.gz", hash = "sha256:0ed7a9b31f560b0c9d929c5291cd430edeb9bed3ce9a497480e536dd4326484c", size = 17704, upload-time = "2024-06-03T15:12:23.284Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/be/3d/df5dc571520f495ba2152215cd26deebd46e1530eae0261f503bfd137e99/imageio_ffmpeg-0.5.1-py3-none-macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:1460e84712b9d06910c1f7bb524096b0341d4b7844cea6c20e099d0a24e795b1", size = 22532925, upload-time = "2024-06-03T15:12:06.451Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/44/c9e18a73dfd939b5b0ec843870ac72f1f6c17e31a03149b687a85465bff7/imageio_ffmpeg-0.5.1-py3-none-manylinux2010_x86_64.whl", hash = "sha256:5289f75c7f755b499653f3209fea4efd1430cba0e39831c381aad2d458f7a316", size = 26900394, upload-time = "2024-06-03T15:12:16.376Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/ca/8537cdbf1a6852912cb293fa23dc7adf256cec793113485447f0cbf0fe79/imageio_ffmpeg-0.5.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7fa9132a291d5eb28c44553550deb40cbdab831f2a614e55360301a6582eb205", size = 22880461, upload-time = "2024-06-03T15:12:20.744Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/97/ff7de8ace4425fffc6e8d7646017500c9d5435df608b13cc34de4835ad4f/imageio_ffmpeg-0.5.1-py3-none-win32.whl", hash = "sha256:89efe2c79979d8174ba8476deb7f74d74c331caee3fb2b65ba2883bec0737625", size = 19652102, upload-time = "2024-06-03T15:11:59.16Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/1c/1b9c72bf839def47626436ea5ebaf643404f7850482c5fafd71a3deeaa94/imageio_ffmpeg-0.5.1-py3-none-win_amd64.whl", hash = "sha256:1521e79e253bedbdd36a547e0cbd94a025ba0b558e17f08fea687d805a0e4698", size = 22619891, upload-time = "2024-06-03T15:12:11.248Z" },
+]
+
+[[package]]
+name = "importlib-metadata"
+version = "8.7.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "zipp" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" },
+]
+
+[[package]]
+name = "jinja2"
+version = "3.1.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markupsafe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
+]
+
+[[package]]
+name = "jsonschema"
+version = "4.25.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "attrs" },
+ { name = "jsonschema-specifications" },
+ { name = "referencing" },
+ { name = "rpds-py" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" },
+]
+
+[[package]]
+name = "jsonschema-specifications"
+version = "2025.9.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "referencing" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" },
+]
+
+[[package]]
+name = "markdown-it-py"
+version = "4.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "mdurl" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
+]
+
+[[package]]
+name = "markupsafe"
+version = "3.0.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" },
+ { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" },
+ { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" },
+ { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" },
+ { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" },
+ { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" },
+ { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" },
+ { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" },
+ { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" },
+ { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" },
+ { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" },
+ { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" },
+ { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" },
+ { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" },
+ { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" },
+ { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" },
+ { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" },
+ { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" },
+ { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" },
+]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
+]
+
+[[package]]
+name = "moviepy"
+version = "1.0.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "decorator" },
+ { name = "imageio" },
+ { name = "imageio-ffmpeg" },
+ { name = "numpy" },
+ { name = "proglog" },
+ { name = "requests" },
+ { name = "tqdm" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/18/54/01a8c4e35c75ca9724d19a7e4de9dc23f0ceb8769102c7de056113af61c3/moviepy-1.0.3.tar.gz", hash = "sha256:2884e35d1788077db3ff89e763c5ba7bfddbd7ae9108c9bc809e7ba58fa433f5", size = 388311, upload-time = "2020-05-07T16:27:46.856Z" }
+
+[[package]]
+name = "mpmath"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" },
+]
+
+[[package]]
+name = "narwhals"
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ec/8f/b0a99455f6e5fe2d4e77deeee8b133cfa06e1f5441c77a70defdbbfbf639/narwhals-2.4.0.tar.gz", hash = "sha256:a71931f7fb3c8e082cbe18ef0740644d87d60eba841ddfa9ba9394de1d43062f", size = 556886, upload-time = "2025-09-08T13:17:36.732Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8a/8c/ac6f6bd2d118ac49e1bc0285e401c1dc50cf878d48156bbc7969902703b0/narwhals-2.4.0-py3-none-any.whl", hash = "sha256:06d958b03e3e3725ae16feee6737b4970991bb52e8465ef75f388c574732ac59", size = 406233, upload-time = "2025-09-08T13:17:35.071Z" },
+]
+
+[[package]]
+name = "networkx"
+version = "3.4.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.11' and sys_platform == 'linux'",
+ "python_full_version < '3.11' and sys_platform != 'linux'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368, upload-time = "2024-10-21T12:39:38.695Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263, upload-time = "2024-10-21T12:39:36.247Z" },
+]
+
+[[package]]
+name = "networkx"
+version = "3.5"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.13' and sys_platform == 'linux'",
+ "python_full_version == '3.12.*' and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and sys_platform == 'linux'",
+ "python_full_version >= '3.13' and sys_platform != 'linux'",
+ "python_full_version == '3.12.*' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and sys_platform != 'linux'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065, upload-time = "2025-05-29T11:35:07.804Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406, upload-time = "2025-05-29T11:35:04.961Z" },
+]
+
+[[package]]
+name = "numpy"
+version = "1.26.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/55/b3/b13bce39ba82b7398c06d10446f5ffd5c07db39b09bd37370dc720c7951c/numpy-1.26.0.tar.gz", hash = "sha256:f93fc78fe8bf15afe2b8d6b6499f1c73953169fad1e9a8dd086cdff3190e7fdf", size = 15633455, upload-time = "2023-09-16T20:12:58.065Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/be/f8/034752c5131c46e10364e4db241974f2eb6bb31bbfc4335344c19e17d909/numpy-1.26.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8db2f125746e44dce707dd44d4f4efeea8d7e2b43aace3f8d1f235cfa2733dd", size = 20617359, upload-time = "2023-09-16T19:58:18.591Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/ff/0e1f31c70495df6a1afbe98fa237f36e6fb7c5443fcb9a53f43170e5814c/numpy-1.26.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0621f7daf973d34d18b4e4bafb210bbaf1ef5e0100b5fa750bd9cde84c7ac292", size = 13953220, upload-time = "2023-09-16T19:58:41.481Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/c7/dc05fb56c0536f499d75ef4e201c37facb75e1ad1f416b98a9939f89f6f1/numpy-1.26.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51be5f8c349fdd1a5568e72713a21f518e7d6707bcf8503b528b88d33b57dc68", size = 14167853, upload-time = "2023-09-16T19:59:03.56Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/5a/f265a1ba3641d16b5480a217a6aed08cceef09cd173b568cd5351053472a/numpy-1.26.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:767254ad364991ccfc4d81b8152912e53e103ec192d1bb4ea6b1f5a7117040be", size = 18181958, upload-time = "2023-09-16T19:59:30.999Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/cc/be866f190cfe818e1eb128f887b3cd715cfa554de9d5fe876c5a3ea3af48/numpy-1.26.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:436c8e9a4bdeeee84e3e59614d38c3dbd3235838a877af8c211cfcac8a80b8d3", size = 18025005, upload-time = "2023-09-16T19:59:59.382Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/16/bb4ff6c803f3000c130618f75a879fc335c9f9434d1317033c35876709ca/numpy-1.26.0-cp310-cp310-win32.whl", hash = "sha256:c2e698cb0c6dda9372ea98a0344245ee65bdc1c9dd939cceed6bb91256837896", size = 20745239, upload-time = "2023-09-16T20:00:33.545Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/05/ef9fc04adda45d537619ea956bc33489f50a46badc949c4280d8309185ec/numpy-1.26.0-cp310-cp310-win_amd64.whl", hash = "sha256:09aaee96c2cbdea95de76ecb8a586cb687d281c881f5f17bfc0fb7f5890f6b91", size = 15793269, upload-time = "2023-09-16T20:00:59.079Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/2f/b42860931c1479714201495ffe47d74460a916ae426a21fc9b68c5e329aa/numpy-1.26.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:637c58b468a69869258b8ae26f4a4c6ff8abffd4a8334c830ffb63e0feefe99a", size = 20619338, upload-time = "2023-09-16T20:01:30.608Z" },
+ { url = "https://files.pythonhosted.org/packages/35/21/9e150d654da358beb29fe216f339dc17f2b2ac13fff2a89669401a910550/numpy-1.26.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:306545e234503a24fe9ae95ebf84d25cba1fdc27db971aa2d9f1ab6bba19a9dd", size = 13981953, upload-time = "2023-09-16T20:01:54.921Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/84/baf694be765d68c73f0f8a9d52151c339aed5f2d64205824a6f29021170c/numpy-1.26.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6adc33561bd1d46f81131d5352348350fc23df4d742bb246cdfca606ea1208", size = 14167328, upload-time = "2023-09-16T20:02:20.922Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/36/161e2f8110f8c49e59f6107bd6da4257d30aff9f06373d0471811f73dcc5/numpy-1.26.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e062aa24638bb5018b7841977c360d2f5917268d125c833a686b7cbabbec496c", size = 18178118, upload-time = "2023-09-16T20:02:49.046Z" },
+ { url = "https://files.pythonhosted.org/packages/37/41/63975634a93da2a384d3c8084eba467242cab68daab0cd8f4fd470dcee26/numpy-1.26.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:546b7dd7e22f3c6861463bebb000646fa730e55df5ee4a0224408b5694cc6148", size = 18020808, upload-time = "2023-09-16T20:03:16.849Z" },
+ { url = "https://files.pythonhosted.org/packages/58/d2/cbc329aa908cb963bd849f14e24f59c002a488e9055fab2c68887a6b5f1c/numpy-1.26.0-cp311-cp311-win32.whl", hash = "sha256:c0b45c8b65b79337dee5134d038346d30e109e9e2e9d43464a2970e5c0e93229", size = 20750149, upload-time = "2023-09-16T20:03:49.609Z" },
+ { url = "https://files.pythonhosted.org/packages/93/fd/3f826c6d15d3bdcf65b8031e4835c52b7d9c45add25efa2314b53850e1a2/numpy-1.26.0-cp311-cp311-win_amd64.whl", hash = "sha256:eae430ecf5794cb7ae7fa3808740b015aa80747e5266153128ef055975a72b99", size = 15794407, upload-time = "2023-09-16T20:04:13.829Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/83/f8a62f08d38d831a2980427ffc465a4207fe600124b00cfb0ef8265594a7/numpy-1.26.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:166b36197e9debc4e384e9c652ba60c0bacc216d0fc89e78f973a9760b503388", size = 20325091, upload-time = "2023-09-16T20:04:44.267Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/72/6d1cbdf0d770016bc9485f9ef02e73d5cb4cf3c726f8e120b860a403d307/numpy-1.26.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f042f66d0b4ae6d48e70e28d487376204d3cbf43b84c03bac57e28dac6151581", size = 13672867, upload-time = "2023-09-16T20:05:05.591Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/70/c071b2347e339f572f5aa61f649b70167e5dd218e3da3dc600c9b08154b9/numpy-1.26.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5e18e5b14a7560d8acf1c596688f4dfd19b4f2945b245a71e5af4ddb7422feb", size = 13872627, upload-time = "2023-09-16T20:05:28.488Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/e2/4ecfbc4a2e3f9d227b008c92a5d1f0370190a639b24fec3b226841eaaf19/numpy-1.26.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f6bad22a791226d0a5c7c27a80a20e11cfe09ad5ef9084d4d3fc4a299cca505", size = 17883864, upload-time = "2023-09-16T20:05:55.622Z" },
+ { url = "https://files.pythonhosted.org/packages/45/08/025bb65dbe19749f1a67a80655670941982e5d0144a4e588ebbdbcfe7983/numpy-1.26.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4acc65dd65da28060e206c8f27a573455ed724e6179941edb19f97e58161bb69", size = 17721550, upload-time = "2023-09-16T20:06:23.505Z" },
+ { url = "https://files.pythonhosted.org/packages/98/66/f0a846751044d0b6db5156fb6304d0336861ed055c21053a0f447103939c/numpy-1.26.0-cp312-cp312-win32.whl", hash = "sha256:bb0d9a1aaf5f1cb7967320e80690a1d7ff69f1d47ebc5a9bea013e3a21faec95", size = 19951520, upload-time = "2023-09-16T20:06:53.976Z" },
+ { url = "https://files.pythonhosted.org/packages/98/d7/1cc7a11118408ad21a5379ff2a4e0b0e27504c68ef6e808ebaa90ee95902/numpy-1.26.0-cp312-cp312-win_amd64.whl", hash = "sha256:ee84ca3c58fe48b8ddafdeb1db87388dce2c3c3f701bf447b05e4cfcc3679112", size = 15504471, upload-time = "2023-09-16T20:07:22.222Z" },
+]
+
+[[package]]
+name = "nvidia-cublas-cu12"
+version = "12.8.4.1"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" },
+]
+
+[[package]]
+name = "nvidia-cuda-cupti-cu12"
+version = "12.8.90"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" },
+]
+
+[[package]]
+name = "nvidia-cuda-nvrtc-cu12"
+version = "12.8.93"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" },
+]
+
+[[package]]
+name = "nvidia-cuda-runtime-cu12"
+version = "12.8.90"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" },
+]
+
+[[package]]
+name = "nvidia-cudnn-cu12"
+version = "9.10.2.21"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" },
+]
+
+[[package]]
+name = "nvidia-cufft-cu12"
+version = "11.3.3.83"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" },
+]
+
+[[package]]
+name = "nvidia-cufile-cu12"
+version = "1.13.1.3"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" },
+]
+
+[[package]]
+name = "nvidia-curand-cu12"
+version = "10.3.9.90"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" },
+]
+
+[[package]]
+name = "nvidia-cusolver-cu12"
+version = "11.7.3.90"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" },
+ { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" },
+ { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" },
+]
+
+[[package]]
+name = "nvidia-cusparse-cu12"
+version = "12.5.8.93"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" },
+]
+
+[[package]]
+name = "nvidia-cusparselt-cu12"
+version = "0.7.1"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" },
+]
+
+[[package]]
+name = "nvidia-nccl-cu12"
+version = "2.27.3"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5c/5b/4e4fff7bad39adf89f735f2bc87248c81db71205b62bcc0d5ca5b606b3c3/nvidia_nccl_cu12-2.27.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adf27ccf4238253e0b826bce3ff5fa532d65fc42322c8bfdfaf28024c0fbe039", size = 322364134, upload-time = "2025-06-03T21:58:04.013Z" },
+]
+
+[[package]]
+name = "nvidia-nvjitlink-cu12"
+version = "12.8.93"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" },
+]
+
+[[package]]
+name = "nvidia-nvtx-cu12"
+version = "12.8.90"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" },
+]
+
+[[package]]
+name = "omegaconf"
+version = "2.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "antlr4-python3-runtime" },
+ { name = "pyyaml" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/09/48/6388f1bb9da707110532cb70ec4d2822858ddfb44f1cdf1233c20a80ea4b/omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7", size = 3298120, upload-time = "2022-12-08T20:59:22.753Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e3/94/1843518e420fa3ed6919835845df698c7e27e183cb997394e4a670973a65/omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b", size = 79500, upload-time = "2022-12-08T20:59:19.686Z" },
+]
+
+[[package]]
+name = "orjson"
+version = "3.11.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/be/4d/8df5f83256a809c22c4d6792ce8d43bb503be0fb7a8e4da9025754b09658/orjson-3.11.3.tar.gz", hash = "sha256:1c0603b1d2ffcd43a411d64797a19556ef76958aef1c182f22dc30860152a98a", size = 5482394, upload-time = "2025-08-26T17:46:43.171Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9b/64/4a3cef001c6cd9c64256348d4c13a7b09b857e3e1cbb5185917df67d8ced/orjson-3.11.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:29cb1f1b008d936803e2da3d7cba726fc47232c45df531b29edf0b232dd737e7", size = 238600, upload-time = "2025-08-26T17:44:36.875Z" },
+ { url = "https://files.pythonhosted.org/packages/10/ce/0c8c87f54f79d051485903dc46226c4d3220b691a151769156054df4562b/orjson-3.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97dceed87ed9139884a55db8722428e27bd8452817fbf1869c58b49fecab1120", size = 123526, upload-time = "2025-08-26T17:44:39.574Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/d0/249497e861f2d438f45b3ab7b7b361484237414945169aa285608f9f7019/orjson-3.11.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:58533f9e8266cb0ac298e259ed7b4d42ed3fa0b78ce76860626164de49e0d467", size = 128075, upload-time = "2025-08-26T17:44:40.672Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/64/00485702f640a0fd56144042a1ea196469f4a3ae93681871564bf74fa996/orjson-3.11.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c212cfdd90512fe722fa9bd620de4d46cda691415be86b2e02243242ae81873", size = 130483, upload-time = "2025-08-26T17:44:41.788Z" },
+ { url = "https://files.pythonhosted.org/packages/64/81/110d68dba3909171bf3f05619ad0cf187b430e64045ae4e0aa7ccfe25b15/orjson-3.11.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff835b5d3e67d9207343effb03760c00335f8b5285bfceefd4dc967b0e48f6a", size = 132539, upload-time = "2025-08-26T17:44:43.12Z" },
+ { url = "https://files.pythonhosted.org/packages/79/92/dba25c22b0ddfafa1e6516a780a00abac28d49f49e7202eb433a53c3e94e/orjson-3.11.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5aa4682912a450c2db89cbd92d356fef47e115dffba07992555542f344d301b", size = 135390, upload-time = "2025-08-26T17:44:44.199Z" },
+ { url = "https://files.pythonhosted.org/packages/44/1d/ca2230fd55edbd87b58a43a19032d63a4b180389a97520cc62c535b726f9/orjson-3.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d18dd34ea2e860553a579df02041845dee0af8985dff7f8661306f95504ddf", size = 132966, upload-time = "2025-08-26T17:44:45.719Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/b9/96bbc8ed3e47e52b487d504bd6861798977445fbc410da6e87e302dc632d/orjson-3.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d8b11701bc43be92ea42bd454910437b355dfb63696c06fe953ffb40b5f763b4", size = 131349, upload-time = "2025-08-26T17:44:46.862Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/3c/418fbd93d94b0df71cddf96b7fe5894d64a5d890b453ac365120daec30f7/orjson-3.11.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:90368277087d4af32d38bd55f9da2ff466d25325bf6167c8f382d8ee40cb2bbc", size = 404087, upload-time = "2025-08-26T17:44:48.079Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/a9/2bfd58817d736c2f63608dec0c34857339d423eeed30099b126562822191/orjson-3.11.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fd7ff459fb393358d3a155d25b275c60b07a2c83dcd7ea962b1923f5a1134569", size = 146067, upload-time = "2025-08-26T17:44:49.302Z" },
+ { url = "https://files.pythonhosted.org/packages/33/ba/29023771f334096f564e48d82ed855a0ed3320389d6748a9c949e25be734/orjson-3.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f8d902867b699bcd09c176a280b1acdab57f924489033e53d0afe79817da37e6", size = 135506, upload-time = "2025-08-26T17:44:50.558Z" },
+ { url = "https://files.pythonhosted.org/packages/39/62/b5a1eca83f54cb3aa11a9645b8a22f08d97dbd13f27f83aae7c6666a0a05/orjson-3.11.3-cp310-cp310-win32.whl", hash = "sha256:bb93562146120bb51e6b154962d3dadc678ed0fce96513fa6bc06599bb6f6edc", size = 136352, upload-time = "2025-08-26T17:44:51.698Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/c0/7ebfaa327d9a9ed982adc0d9420dbce9a3fec45b60ab32c6308f731333fa/orjson-3.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:976c6f1975032cc327161c65d4194c549f2589d88b105a5e3499429a54479770", size = 131539, upload-time = "2025-08-26T17:44:52.974Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/8b/360674cd817faef32e49276187922a946468579fcaf37afdfb6c07046e92/orjson-3.11.3-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9d2ae0cc6aeb669633e0124531f342a17d8e97ea999e42f12a5ad4adaa304c5f", size = 238238, upload-time = "2025-08-26T17:44:54.214Z" },
+ { url = "https://files.pythonhosted.org/packages/05/3d/5fa9ea4b34c1a13be7d9046ba98d06e6feb1d8853718992954ab59d16625/orjson-3.11.3-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:ba21dbb2493e9c653eaffdc38819b004b7b1b246fb77bfc93dc016fe664eac91", size = 127713, upload-time = "2025-08-26T17:44:55.596Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/5f/e18367823925e00b1feec867ff5f040055892fc474bf5f7875649ecfa586/orjson-3.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f1a271e56d511d1569937c0447d7dce5a99a33ea0dec76673706360a051904", size = 123241, upload-time = "2025-08-26T17:44:57.185Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/bd/3c66b91c4564759cf9f473251ac1650e446c7ba92a7c0f9f56ed54f9f0e6/orjson-3.11.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b67e71e47caa6680d1b6f075a396d04fa6ca8ca09aafb428731da9b3ea32a5a6", size = 127895, upload-time = "2025-08-26T17:44:58.349Z" },
+ { url = "https://files.pythonhosted.org/packages/82/b5/dc8dcd609db4766e2967a85f63296c59d4722b39503e5b0bf7fd340d387f/orjson-3.11.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7d012ebddffcce8c85734a6d9e5f08180cd3857c5f5a3ac70185b43775d043d", size = 130303, upload-time = "2025-08-26T17:44:59.491Z" },
+ { url = "https://files.pythonhosted.org/packages/48/c2/d58ec5fd1270b2aa44c862171891adc2e1241bd7dab26c8f46eb97c6c6f1/orjson-3.11.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd759f75d6b8d1b62012b7f5ef9461d03c804f94d539a5515b454ba3a6588038", size = 132366, upload-time = "2025-08-26T17:45:00.654Z" },
+ { url = "https://files.pythonhosted.org/packages/73/87/0ef7e22eb8dd1ef940bfe3b9e441db519e692d62ed1aae365406a16d23d0/orjson-3.11.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6890ace0809627b0dff19cfad92d69d0fa3f089d3e359a2a532507bb6ba34efb", size = 135180, upload-time = "2025-08-26T17:45:02.424Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/6a/e5bf7b70883f374710ad74faf99bacfc4b5b5a7797c1d5e130350e0e28a3/orjson-3.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d4a5e041ae435b815e568537755773d05dac031fee6a57b4ba70897a44d9d2", size = 132741, upload-time = "2025-08-26T17:45:03.663Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/0c/4577fd860b6386ffaa56440e792af01c7882b56d2766f55384b5b0e9d39b/orjson-3.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d68bf97a771836687107abfca089743885fb664b90138d8761cce61d5625d55", size = 131104, upload-time = "2025-08-26T17:45:04.939Z" },
+ { url = "https://files.pythonhosted.org/packages/66/4b/83e92b2d67e86d1c33f2ea9411742a714a26de63641b082bdbf3d8e481af/orjson-3.11.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:bfc27516ec46f4520b18ef645864cee168d2a027dbf32c5537cb1f3e3c22dac1", size = 403887, upload-time = "2025-08-26T17:45:06.228Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/e5/9eea6a14e9b5ceb4a271a1fd2e1dec5f2f686755c0fab6673dc6ff3433f4/orjson-3.11.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f66b001332a017d7945e177e282a40b6997056394e3ed7ddb41fb1813b83e824", size = 145855, upload-time = "2025-08-26T17:45:08.338Z" },
+ { url = "https://files.pythonhosted.org/packages/45/78/8d4f5ad0c80ba9bf8ac4d0fc71f93a7d0dc0844989e645e2074af376c307/orjson-3.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:212e67806525d2561efbfe9e799633b17eb668b8964abed6b5319b2f1cfbae1f", size = 135361, upload-time = "2025-08-26T17:45:09.625Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/5f/16386970370178d7a9b438517ea3d704efcf163d286422bae3b37b88dbb5/orjson-3.11.3-cp311-cp311-win32.whl", hash = "sha256:6e8e0c3b85575a32f2ffa59de455f85ce002b8bdc0662d6b9c2ed6d80ab5d204", size = 136190, upload-time = "2025-08-26T17:45:10.962Z" },
+ { url = "https://files.pythonhosted.org/packages/09/60/db16c6f7a41dd8ac9fb651f66701ff2aeb499ad9ebc15853a26c7c152448/orjson-3.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:6be2f1b5d3dc99a5ce5ce162fc741c22ba9f3443d3dd586e6a1211b7bc87bc7b", size = 131389, upload-time = "2025-08-26T17:45:12.285Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/2a/bb811ad336667041dea9b8565c7c9faf2f59b47eb5ab680315eea612ef2e/orjson-3.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:fafb1a99d740523d964b15c8db4eabbfc86ff29f84898262bf6e3e4c9e97e43e", size = 126120, upload-time = "2025-08-26T17:45:13.515Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/b0/a7edab2a00cdcb2688e1c943401cb3236323e7bfd2839815c6131a3742f4/orjson-3.11.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8c752089db84333e36d754c4baf19c0e1437012242048439c7e80eb0e6426e3b", size = 238259, upload-time = "2025-08-26T17:45:15.093Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/c6/ff4865a9cc398a07a83342713b5932e4dc3cb4bf4bc04e8f83dedfc0d736/orjson-3.11.3-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:9b8761b6cf04a856eb544acdd82fc594b978f12ac3602d6374a7edb9d86fd2c2", size = 127633, upload-time = "2025-08-26T17:45:16.417Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/e6/e00bea2d9472f44fe8794f523e548ce0ad51eb9693cf538a753a27b8bda4/orjson-3.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b13974dc8ac6ba22feaa867fc19135a3e01a134b4f7c9c28162fed4d615008a", size = 123061, upload-time = "2025-08-26T17:45:17.673Z" },
+ { url = "https://files.pythonhosted.org/packages/54/31/9fbb78b8e1eb3ac605467cb846e1c08d0588506028b37f4ee21f978a51d4/orjson-3.11.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f83abab5bacb76d9c821fd5c07728ff224ed0e52d7a71b7b3de822f3df04e15c", size = 127956, upload-time = "2025-08-26T17:45:19.172Z" },
+ { url = "https://files.pythonhosted.org/packages/36/88/b0604c22af1eed9f98d709a96302006915cfd724a7ebd27d6dd11c22d80b/orjson-3.11.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6fbaf48a744b94091a56c62897b27c31ee2da93d826aa5b207131a1e13d4064", size = 130790, upload-time = "2025-08-26T17:45:20.586Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/9d/1c1238ae9fffbfed51ba1e507731b3faaf6b846126a47e9649222b0fd06f/orjson-3.11.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc779b4f4bba2847d0d2940081a7b6f7b5877e05408ffbb74fa1faf4a136c424", size = 132385, upload-time = "2025-08-26T17:45:22.036Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/b5/c06f1b090a1c875f337e21dd71943bc9d84087f7cdf8c6e9086902c34e42/orjson-3.11.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd4b909ce4c50faa2192da6bb684d9848d4510b736b0611b6ab4020ea6fd2d23", size = 135305, upload-time = "2025-08-26T17:45:23.4Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/26/5f028c7d81ad2ebbf84414ba6d6c9cac03f22f5cd0d01eb40fb2d6a06b07/orjson-3.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524b765ad888dc5518bbce12c77c2e83dee1ed6b0992c1790cc5fb49bb4b6667", size = 132875, upload-time = "2025-08-26T17:45:25.182Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/d4/b8df70d9cfb56e385bf39b4e915298f9ae6c61454c8154a0f5fd7efcd42e/orjson-3.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:84fd82870b97ae3cdcea9d8746e592b6d40e1e4d4527835fc520c588d2ded04f", size = 130940, upload-time = "2025-08-26T17:45:27.209Z" },
+ { url = "https://files.pythonhosted.org/packages/da/5e/afe6a052ebc1a4741c792dd96e9f65bf3939d2094e8b356503b68d48f9f5/orjson-3.11.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fbecb9709111be913ae6879b07bafd4b0785b44c1eb5cac8ac76da048b3885a1", size = 403852, upload-time = "2025-08-26T17:45:28.478Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/90/7bbabafeb2ce65915e9247f14a56b29c9334003536009ef5b122783fe67e/orjson-3.11.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9dba358d55aee552bd868de348f4736ca5a4086d9a62e2bfbbeeb5629fe8b0cc", size = 146293, upload-time = "2025-08-26T17:45:29.86Z" },
+ { url = "https://files.pythonhosted.org/packages/27/b3/2d703946447da8b093350570644a663df69448c9d9330e5f1d9cce997f20/orjson-3.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eabcf2e84f1d7105f84580e03012270c7e97ecb1fb1618bda395061b2a84a049", size = 135470, upload-time = "2025-08-26T17:45:31.243Z" },
+ { url = "https://files.pythonhosted.org/packages/38/70/b14dcfae7aff0e379b0119c8a812f8396678919c431efccc8e8a0263e4d9/orjson-3.11.3-cp312-cp312-win32.whl", hash = "sha256:3782d2c60b8116772aea8d9b7905221437fdf53e7277282e8d8b07c220f96cca", size = 136248, upload-time = "2025-08-26T17:45:32.567Z" },
+ { url = "https://files.pythonhosted.org/packages/35/b8/9e3127d65de7fff243f7f3e53f59a531bf6bb295ebe5db024c2503cc0726/orjson-3.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:79b44319268af2eaa3e315b92298de9a0067ade6e6003ddaef72f8e0bedb94f1", size = 131437, upload-time = "2025-08-26T17:45:34.949Z" },
+ { url = "https://files.pythonhosted.org/packages/51/92/a946e737d4d8a7fd84a606aba96220043dcc7d6988b9e7551f7f6d5ba5ad/orjson-3.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:0e92a4e83341ef79d835ca21b8bd13e27c859e4e9e4d7b63defc6e58462a3710", size = 125978, upload-time = "2025-08-26T17:45:36.422Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/79/8932b27293ad35919571f77cb3693b5906cf14f206ef17546052a241fdf6/orjson-3.11.3-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:af40c6612fd2a4b00de648aa26d18186cd1322330bd3a3cc52f87c699e995810", size = 238127, upload-time = "2025-08-26T17:45:38.146Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/82/cb93cd8cf132cd7643b30b6c5a56a26c4e780c7a145db6f83de977b540ce/orjson-3.11.3-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:9f1587f26c235894c09e8b5b7636a38091a9e6e7fe4531937534749c04face43", size = 127494, upload-time = "2025-08-26T17:45:39.57Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/b8/2d9eb181a9b6bb71463a78882bcac1027fd29cf62c38a40cc02fc11d3495/orjson-3.11.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61dcdad16da5bb486d7227a37a2e789c429397793a6955227cedbd7252eb5a27", size = 123017, upload-time = "2025-08-26T17:45:40.876Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/14/a0e971e72d03b509190232356d54c0f34507a05050bd026b8db2bf2c192c/orjson-3.11.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11c6d71478e2cbea0a709e8a06365fa63da81da6498a53e4c4f065881d21ae8f", size = 127898, upload-time = "2025-08-26T17:45:42.188Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/af/dc74536722b03d65e17042cc30ae586161093e5b1f29bccda24765a6ae47/orjson-3.11.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff94112e0098470b665cb0ed06efb187154b63649403b8d5e9aedeb482b4548c", size = 130742, upload-time = "2025-08-26T17:45:43.511Z" },
+ { url = "https://files.pythonhosted.org/packages/62/e6/7a3b63b6677bce089fe939353cda24a7679825c43a24e49f757805fc0d8a/orjson-3.11.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae8b756575aaa2a855a75192f356bbda11a89169830e1439cfb1a3e1a6dde7be", size = 132377, upload-time = "2025-08-26T17:45:45.525Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/cd/ce2ab93e2e7eaf518f0fd15e3068b8c43216c8a44ed82ac2b79ce5cef72d/orjson-3.11.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9416cc19a349c167ef76135b2fe40d03cea93680428efee8771f3e9fb66079d", size = 135313, upload-time = "2025-08-26T17:45:46.821Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/b4/f98355eff0bd1a38454209bbc73372ce351ba29933cb3e2eba16c04b9448/orjson-3.11.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b822caf5b9752bc6f246eb08124c3d12bf2175b66ab74bac2ef3bbf9221ce1b2", size = 132908, upload-time = "2025-08-26T17:45:48.126Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/92/8f5182d7bc2a1bed46ed960b61a39af8389f0ad476120cd99e67182bfb6d/orjson-3.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:414f71e3bdd5573893bf5ecdf35c32b213ed20aa15536fe2f588f946c318824f", size = 130905, upload-time = "2025-08-26T17:45:49.414Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/60/c41ca753ce9ffe3d0f67b9b4c093bdd6e5fdb1bc53064f992f66bb99954d/orjson-3.11.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:828e3149ad8815dc14468f36ab2a4b819237c155ee1370341b91ea4c8672d2ee", size = 403812, upload-time = "2025-08-26T17:45:51.085Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/13/e4a4f16d71ce1868860db59092e78782c67082a8f1dc06a3788aef2b41bc/orjson-3.11.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac9e05f25627ffc714c21f8dfe3a579445a5c392a9c8ae7ba1d0e9fb5333f56e", size = 146277, upload-time = "2025-08-26T17:45:52.851Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/8b/bafb7f0afef9344754a3a0597a12442f1b85a048b82108ef2c956f53babd/orjson-3.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e44fbe4000bd321d9f3b648ae46e0196d21577cf66ae684a96ff90b1f7c93633", size = 135418, upload-time = "2025-08-26T17:45:54.806Z" },
+ { url = "https://files.pythonhosted.org/packages/60/d4/bae8e4f26afb2c23bea69d2f6d566132584d1c3a5fe89ee8c17b718cab67/orjson-3.11.3-cp313-cp313-win32.whl", hash = "sha256:2039b7847ba3eec1f5886e75e6763a16e18c68a63efc4b029ddf994821e2e66b", size = 136216, upload-time = "2025-08-26T17:45:57.182Z" },
+ { url = "https://files.pythonhosted.org/packages/88/76/224985d9f127e121c8cad882cea55f0ebe39f97925de040b75ccd4b33999/orjson-3.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:29be5ac4164aa8bdcba5fa0700a3c9c316b411d8ed9d39ef8a882541bd452fae", size = 131362, upload-time = "2025-08-26T17:45:58.56Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/cf/0dce7a0be94bd36d1346be5067ed65ded6adb795fdbe3abd234c8d576d01/orjson-3.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:18bd1435cb1f2857ceb59cfb7de6f92593ef7b831ccd1b9bfb28ca530e539dce", size = 125989, upload-time = "2025-08-26T17:45:59.95Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/77/d3b1fef1fc6aaeed4cbf3be2b480114035f4df8fa1a99d2dac1d40d6e924/orjson-3.11.3-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cf4b81227ec86935568c7edd78352a92e97af8da7bd70bdfdaa0d2e0011a1ab4", size = 238115, upload-time = "2025-08-26T17:46:01.669Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/6d/468d21d49bb12f900052edcfbf52c292022d0a323d7828dc6376e6319703/orjson-3.11.3-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:bc8bc85b81b6ac9fc4dae393a8c159b817f4c2c9dee5d12b773bddb3b95fc07e", size = 127493, upload-time = "2025-08-26T17:46:03.466Z" },
+ { url = "https://files.pythonhosted.org/packages/67/46/1e2588700d354aacdf9e12cc2d98131fb8ac6f31ca65997bef3863edb8ff/orjson-3.11.3-cp314-cp314-manylinux_2_34_aarch64.whl", hash = "sha256:88dcfc514cfd1b0de038443c7b3e6a9797ffb1b3674ef1fd14f701a13397f82d", size = 122998, upload-time = "2025-08-26T17:46:04.803Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/94/11137c9b6adb3779f1b34fd98be51608a14b430dbc02c6d41134fbba484c/orjson-3.11.3-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:d61cd543d69715d5fc0a690c7c6f8dcc307bc23abef9738957981885f5f38229", size = 132915, upload-time = "2025-08-26T17:46:06.237Z" },
+ { url = "https://files.pythonhosted.org/packages/10/61/dccedcf9e9bcaac09fdabe9eaee0311ca92115699500efbd31950d878833/orjson-3.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2b7b153ed90ababadbef5c3eb39549f9476890d339cf47af563aea7e07db2451", size = 130907, upload-time = "2025-08-26T17:46:07.581Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/fd/0e935539aa7b08b3ca0f817d73034f7eb506792aae5ecc3b7c6e679cdf5f/orjson-3.11.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7909ae2460f5f494fecbcd10613beafe40381fd0316e35d6acb5f3a05bfda167", size = 403852, upload-time = "2025-08-26T17:46:08.982Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/2b/50ae1a5505cd1043379132fdb2adb8a05f37b3e1ebffe94a5073321966fd/orjson-3.11.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:2030c01cbf77bc67bee7eef1e7e31ecf28649353987775e3583062c752da0077", size = 146309, upload-time = "2025-08-26T17:46:10.576Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/1d/a473c158e380ef6f32753b5f39a69028b25ec5be331c2049a2201bde2e19/orjson-3.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a0169ebd1cbd94b26c7a7ad282cf5c2744fce054133f959e02eb5265deae1872", size = 135424, upload-time = "2025-08-26T17:46:12.386Z" },
+ { url = "https://files.pythonhosted.org/packages/da/09/17d9d2b60592890ff7382e591aa1d9afb202a266b180c3d4049b1ec70e4a/orjson-3.11.3-cp314-cp314-win32.whl", hash = "sha256:0c6d7328c200c349e3a4c6d8c83e0a5ad029bdc2d417f234152bf34842d0fc8d", size = 136266, upload-time = "2025-08-26T17:46:13.853Z" },
+ { url = "https://files.pythonhosted.org/packages/15/58/358f6846410a6b4958b74734727e582ed971e13d335d6c7ce3e47730493e/orjson-3.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:317bbe2c069bbc757b1a2e4105b64aacd3bc78279b66a6b9e51e846e4809f804", size = 131351, upload-time = "2025-08-26T17:46:15.27Z" },
+ { url = "https://files.pythonhosted.org/packages/28/01/d6b274a0635be0468d4dbd9cafe80c47105937a0d42434e805e67cd2ed8b/orjson-3.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:e8f6a7a27d7b7bec81bd5924163e9af03d49bbb63013f107b48eb5d16db711bc", size = 125985, upload-time = "2025-08-26T17:46:16.67Z" },
+]
+
+[[package]]
+name = "packaging"
+version = "25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
+]
+
+[[package]]
+name = "pandas"
+version = "2.3.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy" },
+ { name = "python-dateutil" },
+ { name = "pytz" },
+ { name = "tzdata" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/79/8e/0e90233ac205ad182bd6b422532695d2b9414944a280488105d598c70023/pandas-2.3.2.tar.gz", hash = "sha256:ab7b58f8f82706890924ccdfb5f48002b83d2b5a3845976a9fb705d36c34dcdb", size = 4488684, upload-time = "2025-08-21T10:28:29.257Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2e/16/a8eeb70aad84ccbf14076793f90e0031eded63c1899aeae9fdfbf37881f4/pandas-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52bc29a946304c360561974c6542d1dd628ddafa69134a7131fdfd6a5d7a1a35", size = 11539648, upload-time = "2025-08-21T10:26:36.236Z" },
+ { url = "https://files.pythonhosted.org/packages/47/f1/c5bdaea13bf3708554d93e948b7ea74121ce6e0d59537ca4c4f77731072b/pandas-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:220cc5c35ffaa764dd5bb17cf42df283b5cb7fdf49e10a7b053a06c9cb48ee2b", size = 10786923, upload-time = "2025-08-21T10:26:40.518Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/10/811fa01476d29ffed692e735825516ad0e56d925961819e6126b4ba32147/pandas-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c05e15111221384019897df20c6fe893b2f697d03c811ee67ec9e0bb5a3424", size = 11726241, upload-time = "2025-08-21T10:26:43.175Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/6a/40b043b06e08df1ea1b6d20f0e0c2f2c4ec8c4f07d1c92948273d943a50b/pandas-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc03acc273c5515ab69f898df99d9d4f12c4d70dbfc24c3acc6203751d0804cf", size = 12349533, upload-time = "2025-08-21T10:26:46.611Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/ea/2e081a2302e41a9bca7056659fdd2b85ef94923723e41665b42d65afd347/pandas-2.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d25c20a03e8870f6339bcf67281b946bd20b86f1a544ebbebb87e66a8d642cba", size = 13202407, upload-time = "2025-08-21T10:26:49.068Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/12/7ff9f6a79e2ee8869dcf70741ef998b97ea20050fe25f83dc759764c1e32/pandas-2.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21bb612d148bb5860b7eb2c10faacf1a810799245afd342cf297d7551513fbb6", size = 13837212, upload-time = "2025-08-21T10:26:51.832Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/df/5ab92fcd76455a632b3db34a746e1074d432c0cdbbd28d7cd1daba46a75d/pandas-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:b62d586eb25cb8cb70a5746a378fc3194cb7f11ea77170d59f889f5dfe3cec7a", size = 11338099, upload-time = "2025-08-21T10:26:54.382Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/59/f3e010879f118c2d400902d2d871c2226cef29b08c09fb8dc41111730400/pandas-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1333e9c299adcbb68ee89a9bb568fc3f20f9cbb419f1dd5225071e6cddb2a743", size = 11563308, upload-time = "2025-08-21T10:26:56.656Z" },
+ { url = "https://files.pythonhosted.org/packages/38/18/48f10f1cc5c397af59571d638d211f494dba481f449c19adbd282aa8f4ca/pandas-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:76972bcbd7de8e91ad5f0ca884a9f2c477a2125354af624e022c49e5bd0dfff4", size = 10820319, upload-time = "2025-08-21T10:26:59.162Z" },
+ { url = "https://files.pythonhosted.org/packages/95/3b/1e9b69632898b048e223834cd9702052bcf06b15e1ae716eda3196fb972e/pandas-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b98bdd7c456a05eef7cd21fd6b29e3ca243591fe531c62be94a2cc987efb5ac2", size = 11790097, upload-time = "2025-08-21T10:27:02.204Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/ef/0e2ffb30b1f7fbc9a588bd01e3c14a0d96854d09a887e15e30cc19961227/pandas-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d81573b3f7db40d020983f78721e9bfc425f411e616ef019a10ebf597aedb2e", size = 12397958, upload-time = "2025-08-21T10:27:05.409Z" },
+ { url = "https://files.pythonhosted.org/packages/23/82/e6b85f0d92e9afb0e7f705a51d1399b79c7380c19687bfbf3d2837743249/pandas-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e190b738675a73b581736cc8ec71ae113d6c3768d0bd18bffa5b9a0927b0b6ea", size = 13225600, upload-time = "2025-08-21T10:27:07.791Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/f1/f682015893d9ed51611948bd83683670842286a8edd4f68c2c1c3b231eef/pandas-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c253828cb08f47488d60f43c5fc95114c771bbfff085da54bfc79cb4f9e3a372", size = 13879433, upload-time = "2025-08-21T10:27:10.347Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/e7/ae86261695b6c8a36d6a4c8d5f9b9ede8248510d689a2f379a18354b37d7/pandas-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:9467697b8083f9667b212633ad6aa4ab32436dcbaf4cd57325debb0ddef2012f", size = 11336557, upload-time = "2025-08-21T10:27:12.983Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/db/614c20fb7a85a14828edd23f1c02db58a30abf3ce76f38806155d160313c/pandas-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fbb977f802156e7a3f829e9d1d5398f6192375a3e2d1a9ee0803e35fe70a2b9", size = 11587652, upload-time = "2025-08-21T10:27:15.888Z" },
+ { url = "https://files.pythonhosted.org/packages/99/b0/756e52f6582cade5e746f19bad0517ff27ba9c73404607c0306585c201b3/pandas-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b9b52693123dd234b7c985c68b709b0b009f4521000d0525f2b95c22f15944b", size = 10717686, upload-time = "2025-08-21T10:27:18.486Z" },
+ { url = "https://files.pythonhosted.org/packages/37/4c/dd5ccc1e357abfeee8353123282de17997f90ff67855f86154e5a13b81e5/pandas-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bd281310d4f412733f319a5bc552f86d62cddc5f51d2e392c8787335c994175", size = 11278722, upload-time = "2025-08-21T10:27:21.149Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/a4/f7edcfa47e0a88cda0be8b068a5bae710bf264f867edfdf7b71584ace362/pandas-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d31a6b4354e3b9b8a2c848af75d31da390657e3ac6f30c05c82068b9ed79b9", size = 11987803, upload-time = "2025-08-21T10:27:23.767Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/61/1bce4129f93ab66f1c68b7ed1c12bac6a70b1b56c5dab359c6bbcd480b52/pandas-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:df4df0b9d02bb873a106971bb85d448378ef14b86ba96f035f50bbd3688456b4", size = 12766345, upload-time = "2025-08-21T10:27:26.6Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/46/80d53de70fee835531da3a1dae827a1e76e77a43ad22a8cd0f8142b61587/pandas-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:213a5adf93d020b74327cb2c1b842884dbdd37f895f42dcc2f09d451d949f811", size = 13439314, upload-time = "2025-08-21T10:27:29.213Z" },
+ { url = "https://files.pythonhosted.org/packages/28/30/8114832daff7489f179971dbc1d854109b7f4365a546e3ea75b6516cea95/pandas-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c13b81a9347eb8c7548f53fd9a4f08d4dfe996836543f805c987bafa03317ae", size = 10983326, upload-time = "2025-08-21T10:27:31.901Z" },
+ { url = "https://files.pythonhosted.org/packages/27/64/a2f7bf678af502e16b472527735d168b22b7824e45a4d7e96a4fbb634b59/pandas-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0c6ecbac99a354a051ef21c5307601093cb9e0f4b1855984a084bfec9302699e", size = 11531061, upload-time = "2025-08-21T10:27:34.647Z" },
+ { url = "https://files.pythonhosted.org/packages/54/4c/c3d21b2b7769ef2f4c2b9299fcadd601efa6729f1357a8dbce8dd949ed70/pandas-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6f048aa0fd080d6a06cc7e7537c09b53be6642d330ac6f54a600c3ace857ee9", size = 10668666, upload-time = "2025-08-21T10:27:37.203Z" },
+ { url = "https://files.pythonhosted.org/packages/50/e2/f775ba76ecfb3424d7f5862620841cf0edb592e9abd2d2a5387d305fe7a8/pandas-2.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0064187b80a5be6f2f9c9d6bdde29372468751dfa89f4211a3c5871854cfbf7a", size = 11332835, upload-time = "2025-08-21T10:27:40.188Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/52/0634adaace9be2d8cac9ef78f05c47f3a675882e068438b9d7ec7ef0c13f/pandas-2.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac8c320bded4718b298281339c1a50fb00a6ba78cb2a63521c39bec95b0209b", size = 12057211, upload-time = "2025-08-21T10:27:43.117Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/9d/2df913f14b2deb9c748975fdb2491da1a78773debb25abbc7cbc67c6b549/pandas-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:114c2fe4f4328cf98ce5716d1532f3ab79c5919f95a9cfee81d9140064a2e4d6", size = 12749277, upload-time = "2025-08-21T10:27:45.474Z" },
+ { url = "https://files.pythonhosted.org/packages/87/af/da1a2417026bd14d98c236dba88e39837182459d29dcfcea510b2ac9e8a1/pandas-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:48fa91c4dfb3b2b9bfdb5c24cd3567575f4e13f9636810462ffed8925352be5a", size = 13415256, upload-time = "2025-08-21T10:27:49.885Z" },
+ { url = "https://files.pythonhosted.org/packages/22/3c/f2af1ce8840ef648584a6156489636b5692c162771918aa95707c165ad2b/pandas-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:12d039facec710f7ba305786837d0225a3444af7bbd9c15c32ca2d40d157ed8b", size = 10982579, upload-time = "2025-08-21T10:28:08.435Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/98/8df69c4097a6719e357dc249bf437b8efbde808038268e584421696cbddf/pandas-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c624b615ce97864eb588779ed4046186f967374185c047070545253a52ab2d57", size = 12028163, upload-time = "2025-08-21T10:27:52.232Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/23/f95cbcbea319f349e10ff90db488b905c6883f03cbabd34f6b03cbc3c044/pandas-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0cee69d583b9b128823d9514171cabb6861e09409af805b54459bd0c821a35c2", size = 11391860, upload-time = "2025-08-21T10:27:54.673Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/1b/6a984e98c4abee22058aa75bfb8eb90dce58cf8d7296f8bc56c14bc330b0/pandas-2.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2319656ed81124982900b4c37f0e0c58c015af9a7bbc62342ba5ad07ace82ba9", size = 11309830, upload-time = "2025-08-21T10:27:56.957Z" },
+ { url = "https://files.pythonhosted.org/packages/15/d5/f0486090eb18dd8710bf60afeaf638ba6817047c0c8ae5c6a25598665609/pandas-2.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b37205ad6f00d52f16b6d09f406434ba928c1a1966e2771006a9033c736d30d2", size = 11883216, upload-time = "2025-08-21T10:27:59.302Z" },
+ { url = "https://files.pythonhosted.org/packages/10/86/692050c119696da19e20245bbd650d8dfca6ceb577da027c3a73c62a047e/pandas-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:837248b4fc3a9b83b9c6214699a13f069dc13510a6a6d7f9ba33145d2841a012", size = 12699743, upload-time = "2025-08-21T10:28:02.447Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/d7/612123674d7b17cf345aad0a10289b2a384bff404e0463a83c4a3a59d205/pandas-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d2c3554bd31b731cd6490d94a28f3abb8dd770634a9e06eb6d2911b9827db370", size = 13186141, upload-time = "2025-08-21T10:28:05.377Z" },
+]
+
+[[package]]
+name = "pillow"
+version = "9.5.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/00/d5/4903f310765e0ff2b8e91ffe55031ac6af77d982f0156061e20a4d1a8b2d/Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1", size = 50488147, upload-time = "2023-04-01T09:31:37.159Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1b/bc/cff591742feea45f88a3b8a83f7cab4a1dcdb4bcdfc51a06d92f96c81165/Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16", size = 3395758, upload-time = "2023-04-01T09:28:03.251Z" },
+ { url = "https://files.pythonhosted.org/packages/38/06/de304914ecd2c911939a28579546bd4d9b6ae0b3c07ce5fe9bd7d100eb34/Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa", size = 3077111, upload-time = "2023-04-01T09:28:07.916Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/57/7864b6a22acb5f1d4b70af8c92cbd5e3af25f4d5869c24cd8074ca1f3593/Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38", size = 3112529, upload-time = "2023-04-01T09:28:10.564Z" },
+ { url = "https://files.pythonhosted.org/packages/62/88/46a35f690ee4f8b08aef5fdb47f63d29c34f6874834155e52bf4456d9566/Pillow-9.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062", size = 3386670, upload-time = "2023-04-01T09:28:13.539Z" },
+ { url = "https://files.pythonhosted.org/packages/59/1d/26a56ed1deae695a8c7d13fb514284ba8b9fd62bab9ebe6d6b474523b8b0/Pillow-9.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e", size = 3308572, upload-time = "2023-04-01T09:28:16.585Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/36/d22b0fac821a14572fdb9a8015b2bf19ee81eaa560ea25a6772760c86a30/Pillow-9.5.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5", size = 3163999, upload-time = "2023-04-01T09:28:19.777Z" },
+ { url = "https://files.pythonhosted.org/packages/25/6b/d3c35d207c9c0b6c2f855420f62e64ef43d348e8c797ad1c32b9f2106a19/Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d", size = 3415623, upload-time = "2023-04-01T09:28:23.176Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/6a/a7df39c502caeadd942d8bf97bc2fdfc819fbdc7499a2ab05e7db43611ac/Pillow-9.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903", size = 3350658, upload-time = "2023-04-01T09:28:26.277Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/ad/d29c8c48498da680521665b8483beb78a9343269bbd0730970e9396b01f0/Pillow-9.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a", size = 3414574, upload-time = "2023-04-01T09:28:30.143Z" },
+ { url = "https://files.pythonhosted.org/packages/93/54/9d7f01fd3fe4069c88827728646e3c8f1aff0995e8422d841b38f034f39a/Pillow-9.5.0-cp310-cp310-win32.whl", hash = "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44", size = 2211916, upload-time = "2023-04-01T09:28:33.723Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/14/0030e542f2acfea43635e55584c114e6cfd94d342393a5f71f74c172dc35/Pillow-9.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb", size = 2511474, upload-time = "2023-04-01T09:28:35.846Z" },
+ { url = "https://files.pythonhosted.org/packages/78/a8/3c2d737d856eb9cd8c18e78f6fe0ed08a2805bded74cbb0455584859023b/Pillow-9.5.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32", size = 3395792, upload-time = "2023-04-01T09:28:38.917Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/15/310cde63cb15a091de889ded26281924cf9cfa5c000b36b06bd0c7f50261/Pillow-9.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c", size = 3077092, upload-time = "2023-04-01T09:28:41.28Z" },
+ { url = "https://files.pythonhosted.org/packages/17/66/20db69c0361902a2f6ee2086d3e83c70133e3fb4cb31470e59a8ed37184e/Pillow-9.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3", size = 3112543, upload-time = "2023-04-01T09:28:43.89Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/a8/ff526cdec6b56eb20c992e7083f02c8065049ed1e62fbc159390d7a3dd5e/Pillow-9.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a", size = 3386654, upload-time = "2023-04-01T09:28:46.378Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/70/e9a45a2e9c58c23e023fcda5af9686f5b42c718cc9bc86194e0025cf0ec5/Pillow-9.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1", size = 3308566, upload-time = "2023-04-01T09:28:49.521Z" },
+ { url = "https://files.pythonhosted.org/packages/61/a5/ee306d6cc53c9a30c23ba2313b43b67fdf76c611ca5afd0cdd62922cbd3e/Pillow-9.5.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99", size = 3164027, upload-time = "2023-04-01T09:28:52.295Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/59/e6bd2c3715ace343d9739276ceed79657fe116923238d102cf731ab463dd/Pillow-9.5.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625", size = 3415610, upload-time = "2023-04-01T09:28:54.667Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/6d/9beb596ba5a5e61081c843187bcdbb42a5c9a9ef552751b554894247da7a/Pillow-9.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579", size = 3350704, upload-time = "2023-04-01T09:28:57.098Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/e4/de633d85be3b3c770c554a37a89e8273069bd19c34b15a419c2795600310/Pillow-9.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296", size = 3414604, upload-time = "2023-04-01T09:29:03.375Z" },
+ { url = "https://files.pythonhosted.org/packages/46/a0/e410f655300932308e70e883dd60c0c51e6f74bed138641ea9193e64fd7c/Pillow-9.5.0-cp311-cp311-win32.whl", hash = "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec", size = 2211929, upload-time = "2023-04-01T09:29:06.338Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/02/7729c8aecbc525b560c7eb283ffa34c6f5a6d0ed6d1339570c65a3e63088/Pillow-9.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4", size = 2511551, upload-time = "2023-04-01T09:29:08.636Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/8b/d38cc68796be4ac238db327682a1acfbc5deccf64a150aa44ee1efbaafae/Pillow-9.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089", size = 2489206, upload-time = "2023-04-01T20:01:51.312Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/38/b7bcbab3bfe1946ba9cf71c1fa03e541b498069457be49eadcdc229412ef/Pillow-9.5.0-cp312-cp312-win32.whl", hash = "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb", size = 2211914, upload-time = "2023-04-01T09:29:10.935Z" },
+ { url = "https://files.pythonhosted.org/packages/29/8a/f4cf3f32bc554f9260b645ea1151449ac13525796d3d1a42076d75945d8d/Pillow-9.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b", size = 2511483, upload-time = "2023-04-01T09:29:13.217Z" },
+]
+
+[[package]]
+name = "platformdirs"
+version = "4.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" },
+]
+
+[[package]]
+name = "proglog"
+version = "0.1.12"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "tqdm" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/c2/af/c108866c452eda1132f3d6b3cb6be2ae8430c97e9309f38ca9dbd430af37/proglog-0.1.12.tar.gz", hash = "sha256:361ee074721c277b89b75c061336cb8c5f287c92b043efa562ccf7866cda931c", size = 8794, upload-time = "2025-05-09T14:36:18.316Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c1/1b/f7ea6cde25621cd9236541c66ff018f4268012a534ec31032bcb187dc5e7/proglog-0.1.12-py3-none-any.whl", hash = "sha256:ccaafce51e80a81c65dc907a460c07ccb8ec1f78dc660cfd8f9ec3a22f01b84c", size = 6337, upload-time = "2025-05-09T14:36:16.798Z" },
+]
+
+[[package]]
+name = "protobuf"
+version = "6.32.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/c0/df/fb4a8eeea482eca989b51cffd274aac2ee24e825f0bf3cbce5281fa1567b/protobuf-6.32.0.tar.gz", hash = "sha256:a81439049127067fc49ec1d36e25c6ee1d1a2b7be930675f919258d03c04e7d2", size = 440614, upload-time = "2025-08-14T21:21:25.015Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/33/18/df8c87da2e47f4f1dcc5153a81cd6bca4e429803f4069a299e236e4dd510/protobuf-6.32.0-cp310-abi3-win32.whl", hash = "sha256:84f9e3c1ff6fb0308dbacb0950d8aa90694b0d0ee68e75719cb044b7078fe741", size = 424409, upload-time = "2025-08-14T21:21:12.366Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/59/0a820b7310f8139bd8d5a9388e6a38e1786d179d6f33998448609296c229/protobuf-6.32.0-cp310-abi3-win_amd64.whl", hash = "sha256:a8bdbb2f009cfc22a36d031f22a625a38b615b5e19e558a7b756b3279723e68e", size = 435735, upload-time = "2025-08-14T21:21:15.046Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/5b/0d421533c59c789e9c9894683efac582c06246bf24bb26b753b149bd88e4/protobuf-6.32.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d52691e5bee6c860fff9a1c86ad26a13afbeb4b168cd4445c922b7e2cf85aaf0", size = 426449, upload-time = "2025-08-14T21:21:16.687Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/7b/607764ebe6c7a23dcee06e054fd1de3d5841b7648a90fd6def9a3bb58c5e/protobuf-6.32.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:501fe6372fd1c8ea2a30b4d9be8f87955a64d6be9c88a973996cef5ef6f0abf1", size = 322869, upload-time = "2025-08-14T21:21:18.282Z" },
+ { url = "https://files.pythonhosted.org/packages/40/01/2e730bd1c25392fc32e3268e02446f0d77cb51a2c3a8486b1798e34d5805/protobuf-6.32.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:75a2aab2bd1aeb1f5dc7c5f33bcb11d82ea8c055c9becbb41c26a8c43fd7092c", size = 322009, upload-time = "2025-08-14T21:21:19.893Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/f2/80ffc4677aac1bc3519b26bc7f7f5de7fce0ee2f7e36e59e27d8beb32dd1/protobuf-6.32.0-py3-none-any.whl", hash = "sha256:ba377e5b67b908c8f3072a57b63e2c6a4cbd18aea4ed98d2584350dbf46f2783", size = 169287, upload-time = "2025-08-14T21:21:23.515Z" },
+]
+
+[[package]]
+name = "psutil"
+version = "5.9.8"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/90/c7/6dc0a455d111f68ee43f27793971cf03fe29b6ef972042549db29eec39a2/psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", size = 503247, upload-time = "2024-01-19T20:47:09.517Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e7/e3/07ae864a636d70a8a6f58da27cb1179192f1140d5d1da10886ade9405797/psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", size = 248702, upload-time = "2024-01-19T20:47:36.303Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/bd/28c5f553667116b2598b9cc55908ec435cb7f77a34f2bff3e3ca765b0f78/psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", size = 285242, upload-time = "2024-01-19T20:47:39.65Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/4f/0e22aaa246f96d6ac87fe5ebb9c5a693fbe8877f537a1022527c47ca43c5/psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", size = 288191, upload-time = "2024-01-19T20:47:43.078Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/f5/2aa3a4acdc1e5940b59d421742356f133185667dd190b166dbcfcf5d7b43/psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0", size = 251252, upload-time = "2024-01-19T20:47:52.88Z" },
+ { url = "https://files.pythonhosted.org/packages/93/52/3e39d26feae7df0aa0fd510b14012c3678b36ed068f7d78b8d8784d61f0e/psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf", size = 255090, upload-time = "2024-01-19T20:47:56.019Z" },
+ { url = "https://files.pythonhosted.org/packages/05/33/2d74d588408caedd065c2497bdb5ef83ce6082db01289a1e1147f6639802/psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8", size = 249898, upload-time = "2024-01-19T20:47:59.238Z" },
+]
+
+[[package]]
+name = "pyarrow"
+version = "21.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ef/c2/ea068b8f00905c06329a3dfcd40d0fcc2b7d0f2e355bdb25b65e0a0e4cd4/pyarrow-21.0.0.tar.gz", hash = "sha256:5051f2dccf0e283ff56335760cbc8622cf52264d67e359d5569541ac11b6d5bc", size = 1133487, upload-time = "2025-07-18T00:57:31.761Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/17/d9/110de31880016e2afc52d8580b397dbe47615defbf09ca8cf55f56c62165/pyarrow-21.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e563271e2c5ff4d4a4cbeb2c83d5cf0d4938b891518e676025f7268c6fe5fe26", size = 31196837, upload-time = "2025-07-18T00:54:34.755Z" },
+ { url = "https://files.pythonhosted.org/packages/df/5f/c1c1997613abf24fceb087e79432d24c19bc6f7259cab57c2c8e5e545fab/pyarrow-21.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fee33b0ca46f4c85443d6c450357101e47d53e6c3f008d658c27a2d020d44c79", size = 32659470, upload-time = "2025-07-18T00:54:38.329Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/ed/b1589a777816ee33ba123ba1e4f8f02243a844fed0deec97bde9fb21a5cf/pyarrow-21.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:7be45519b830f7c24b21d630a31d48bcebfd5d4d7f9d3bdb49da9cdf6d764edb", size = 41055619, upload-time = "2025-07-18T00:54:42.172Z" },
+ { url = "https://files.pythonhosted.org/packages/44/28/b6672962639e85dc0ac36f71ab3a8f5f38e01b51343d7aa372a6b56fa3f3/pyarrow-21.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:26bfd95f6bff443ceae63c65dc7e048670b7e98bc892210acba7e4995d3d4b51", size = 42733488, upload-time = "2025-07-18T00:54:47.132Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/cc/de02c3614874b9089c94eac093f90ca5dfa6d5afe45de3ba847fd950fdf1/pyarrow-21.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bd04ec08f7f8bd113c55868bd3fc442a9db67c27af098c5f814a3091e71cc61a", size = 43329159, upload-time = "2025-07-18T00:54:51.686Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/3e/99473332ac40278f196e105ce30b79ab8affab12f6194802f2593d6b0be2/pyarrow-21.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9b0b14b49ac10654332a805aedfc0147fb3469cbf8ea951b3d040dab12372594", size = 45050567, upload-time = "2025-07-18T00:54:56.679Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/f5/c372ef60593d713e8bfbb7e0c743501605f0ad00719146dc075faf11172b/pyarrow-21.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:9d9f8bcb4c3be7738add259738abdeddc363de1b80e3310e04067aa1ca596634", size = 26217959, upload-time = "2025-07-18T00:55:00.482Z" },
+ { url = "https://files.pythonhosted.org/packages/94/dc/80564a3071a57c20b7c32575e4a0120e8a330ef487c319b122942d665960/pyarrow-21.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c077f48aab61738c237802836fc3844f85409a46015635198761b0d6a688f87b", size = 31243234, upload-time = "2025-07-18T00:55:03.812Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/cc/3b51cb2db26fe535d14f74cab4c79b191ed9a8cd4cbba45e2379b5ca2746/pyarrow-21.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:689f448066781856237eca8d1975b98cace19b8dd2ab6145bf49475478bcaa10", size = 32714370, upload-time = "2025-07-18T00:55:07.495Z" },
+ { url = "https://files.pythonhosted.org/packages/24/11/a4431f36d5ad7d83b87146f515c063e4d07ef0b7240876ddb885e6b44f2e/pyarrow-21.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:479ee41399fcddc46159a551705b89c05f11e8b8cb8e968f7fec64f62d91985e", size = 41135424, upload-time = "2025-07-18T00:55:11.461Z" },
+ { url = "https://files.pythonhosted.org/packages/74/dc/035d54638fc5d2971cbf1e987ccd45f1091c83bcf747281cf6cc25e72c88/pyarrow-21.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:40ebfcb54a4f11bcde86bc586cbd0272bac0d516cfa539c799c2453768477569", size = 42823810, upload-time = "2025-07-18T00:55:16.301Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/3b/89fced102448a9e3e0d4dded1f37fa3ce4700f02cdb8665457fcc8015f5b/pyarrow-21.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8d58d8497814274d3d20214fbb24abcad2f7e351474357d552a8d53bce70c70e", size = 43391538, upload-time = "2025-07-18T00:55:23.82Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/bb/ea7f1bd08978d39debd3b23611c293f64a642557e8141c80635d501e6d53/pyarrow-21.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:585e7224f21124dd57836b1530ac8f2df2afc43c861d7bf3d58a4870c42ae36c", size = 45120056, upload-time = "2025-07-18T00:55:28.231Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/0b/77ea0600009842b30ceebc3337639a7380cd946061b620ac1a2f3cb541e2/pyarrow-21.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:555ca6935b2cbca2c0e932bedd853e9bc523098c39636de9ad4693b5b1df86d6", size = 26220568, upload-time = "2025-07-18T00:55:32.122Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/d4/d4f817b21aacc30195cf6a46ba041dd1be827efa4a623cc8bf39a1c2a0c0/pyarrow-21.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3a302f0e0963db37e0a24a70c56cf91a4faa0bca51c23812279ca2e23481fccd", size = 31160305, upload-time = "2025-07-18T00:55:35.373Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/9c/dcd38ce6e4b4d9a19e1d36914cb8e2b1da4e6003dd075474c4cfcdfe0601/pyarrow-21.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:b6b27cf01e243871390474a211a7922bfbe3bda21e39bc9160daf0da3fe48876", size = 32684264, upload-time = "2025-07-18T00:55:39.303Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/74/2a2d9f8d7a59b639523454bec12dba35ae3d0a07d8ab529dc0809f74b23c/pyarrow-21.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:e72a8ec6b868e258a2cd2672d91f2860ad532d590ce94cdf7d5e7ec674ccf03d", size = 41108099, upload-time = "2025-07-18T00:55:42.889Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/90/2660332eeb31303c13b653ea566a9918484b6e4d6b9d2d46879a33ab0622/pyarrow-21.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b7ae0bbdc8c6674259b25bef5d2a1d6af5d39d7200c819cf99e07f7dfef1c51e", size = 42829529, upload-time = "2025-07-18T00:55:47.069Z" },
+ { url = "https://files.pythonhosted.org/packages/33/27/1a93a25c92717f6aa0fca06eb4700860577d016cd3ae51aad0e0488ac899/pyarrow-21.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:58c30a1729f82d201627c173d91bd431db88ea74dcaa3885855bc6203e433b82", size = 43367883, upload-time = "2025-07-18T00:55:53.069Z" },
+ { url = "https://files.pythonhosted.org/packages/05/d9/4d09d919f35d599bc05c6950095e358c3e15148ead26292dfca1fb659b0c/pyarrow-21.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:072116f65604b822a7f22945a7a6e581cfa28e3454fdcc6939d4ff6090126623", size = 45133802, upload-time = "2025-07-18T00:55:57.714Z" },
+ { url = "https://files.pythonhosted.org/packages/71/30/f3795b6e192c3ab881325ffe172e526499eb3780e306a15103a2764916a2/pyarrow-21.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:cf56ec8b0a5c8c9d7021d6fd754e688104f9ebebf1bf4449613c9531f5346a18", size = 26203175, upload-time = "2025-07-18T00:56:01.364Z" },
+ { url = "https://files.pythonhosted.org/packages/16/ca/c7eaa8e62db8fb37ce942b1ea0c6d7abfe3786ca193957afa25e71b81b66/pyarrow-21.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:e99310a4ebd4479bcd1964dff9e14af33746300cb014aa4a3781738ac63baf4a", size = 31154306, upload-time = "2025-07-18T00:56:04.42Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/e8/e87d9e3b2489302b3a1aea709aaca4b781c5252fcb812a17ab6275a9a484/pyarrow-21.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:d2fe8e7f3ce329a71b7ddd7498b3cfac0eeb200c2789bd840234f0dc271a8efe", size = 32680622, upload-time = "2025-07-18T00:56:07.505Z" },
+ { url = "https://files.pythonhosted.org/packages/84/52/79095d73a742aa0aba370c7942b1b655f598069489ab387fe47261a849e1/pyarrow-21.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:f522e5709379d72fb3da7785aa489ff0bb87448a9dc5a75f45763a795a089ebd", size = 41104094, upload-time = "2025-07-18T00:56:10.994Z" },
+ { url = "https://files.pythonhosted.org/packages/89/4b/7782438b551dbb0468892a276b8c789b8bbdb25ea5c5eb27faadd753e037/pyarrow-21.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:69cbbdf0631396e9925e048cfa5bce4e8c3d3b41562bbd70c685a8eb53a91e61", size = 42825576, upload-time = "2025-07-18T00:56:15.569Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/62/0f29de6e0a1e33518dec92c65be0351d32d7ca351e51ec5f4f837a9aab91/pyarrow-21.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:731c7022587006b755d0bdb27626a1a3bb004bb56b11fb30d98b6c1b4718579d", size = 43368342, upload-time = "2025-07-18T00:56:19.531Z" },
+ { url = "https://files.pythonhosted.org/packages/90/c7/0fa1f3f29cf75f339768cc698c8ad4ddd2481c1742e9741459911c9ac477/pyarrow-21.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dc56bc708f2d8ac71bd1dcb927e458c93cec10b98eb4120206a4091db7b67b99", size = 45131218, upload-time = "2025-07-18T00:56:23.347Z" },
+ { url = "https://files.pythonhosted.org/packages/01/63/581f2076465e67b23bc5a37d4a2abff8362d389d29d8105832e82c9c811c/pyarrow-21.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:186aa00bca62139f75b7de8420f745f2af12941595bbbfa7ed3870ff63e25636", size = 26087551, upload-time = "2025-07-18T00:56:26.758Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/ab/357d0d9648bb8241ee7348e564f2479d206ebe6e1c47ac5027c2e31ecd39/pyarrow-21.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:a7a102574faa3f421141a64c10216e078df467ab9576684d5cd696952546e2da", size = 31290064, upload-time = "2025-07-18T00:56:30.214Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/8a/5685d62a990e4cac2043fc76b4661bf38d06efed55cf45a334b455bd2759/pyarrow-21.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:1e005378c4a2c6db3ada3ad4c217b381f6c886f0a80d6a316fe586b90f77efd7", size = 32727837, upload-time = "2025-07-18T00:56:33.935Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/de/c0828ee09525c2bafefd3e736a248ebe764d07d0fd762d4f0929dbc516c9/pyarrow-21.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:65f8e85f79031449ec8706b74504a316805217b35b6099155dd7e227eef0d4b6", size = 41014158, upload-time = "2025-07-18T00:56:37.528Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/26/a2865c420c50b7a3748320b614f3484bfcde8347b2639b2b903b21ce6a72/pyarrow-21.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:3a81486adc665c7eb1a2bde0224cfca6ceaba344a82a971ef059678417880eb8", size = 42667885, upload-time = "2025-07-18T00:56:41.483Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/f9/4ee798dc902533159250fb4321267730bc0a107d8c6889e07c3add4fe3a5/pyarrow-21.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:fc0d2f88b81dcf3ccf9a6ae17f89183762c8a94a5bdcfa09e05cfe413acf0503", size = 43276625, upload-time = "2025-07-18T00:56:48.002Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/da/e02544d6997037a4b0d22d8e5f66bc9315c3671371a8b18c79ade1cefe14/pyarrow-21.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6299449adf89df38537837487a4f8d3bd91ec94354fdd2a7d30bc11c48ef6e79", size = 44951890, upload-time = "2025-07-18T00:56:52.568Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/4e/519c1bc1876625fe6b71e9a28287c43ec2f20f73c658b9ae1d485c0c206e/pyarrow-21.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:222c39e2c70113543982c6b34f3077962b44fca38c0bd9e68bb6781534425c10", size = 26371006, upload-time = "2025-07-18T00:56:56.379Z" },
+]
+
+[[package]]
+name = "pydantic"
+version = "2.11.7"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "annotated-types" },
+ { name = "pydantic-core" },
+ { name = "typing-extensions" },
+ { name = "typing-inspection" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" },
+]
+
+[[package]]
+name = "pydantic-core"
+version = "2.33.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" },
+ { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" },
+ { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" },
+ { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" },
+ { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" },
+ { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" },
+ { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" },
+ { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" },
+ { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" },
+ { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" },
+ { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" },
+ { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" },
+ { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" },
+ { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" },
+ { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" },
+ { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" },
+ { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" },
+ { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" },
+ { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" },
+ { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
+ { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" },
+ { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" },
+ { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" },
+ { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" },
+ { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" },
+ { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" },
+ { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" },
+]
+
+[[package]]
+name = "pydeck"
+version = "0.9.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jinja2" },
+ { name = "numpy" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a1/ca/40e14e196864a0f61a92abb14d09b3d3da98f94ccb03b49cf51688140dab/pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605", size = 3832240, upload-time = "2024-05-10T15:36:21.153Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ab/4c/b888e6cf58bd9db9c93f40d1c6be8283ff49d88919231afe93a6bcf61626/pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038", size = 6900403, upload-time = "2024-05-10T15:36:17.36Z" },
+]
+
+[[package]]
+name = "pydub"
+version = "0.25.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/fe/9a/e6bca0eed82db26562c73b5076539a4a08d3cffd19c3cc5913a3e61145fd/pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f", size = 38326, upload-time = "2021-03-10T02:09:54.659Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a6/53/d78dc063216e62fc55f6b2eebb447f6a4b0a59f55c8406376f76bf959b08/pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6", size = 32327, upload-time = "2021-03-10T02:09:53.503Z" },
+]
+
+[[package]]
+name = "pygments"
+version = "2.19.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
+]
+
+[[package]]
+name = "python-dateutil"
+version = "2.9.0.post0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "six" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
+]
+
+[[package]]
+name = "python-multipart"
+version = "0.0.20"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
+]
+
+[[package]]
+name = "pytz"
+version = "2025.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199, upload-time = "2024-08-06T20:31:40.178Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758, upload-time = "2024-08-06T20:31:42.173Z" },
+ { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463, upload-time = "2024-08-06T20:31:44.263Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280, upload-time = "2024-08-06T20:31:50.199Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239, upload-time = "2024-08-06T20:31:52.292Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802, upload-time = "2024-08-06T20:31:53.836Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527, upload-time = "2024-08-06T20:31:55.565Z" },
+ { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052, upload-time = "2024-08-06T20:31:56.914Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774, upload-time = "2024-08-06T20:31:58.304Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612, upload-time = "2024-08-06T20:32:03.408Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040, upload-time = "2024-08-06T20:32:04.926Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829, upload-time = "2024-08-06T20:32:06.459Z" },
+ { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167, upload-time = "2024-08-06T20:32:08.338Z" },
+ { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952, upload-time = "2024-08-06T20:32:14.124Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301, upload-time = "2024-08-06T20:32:16.17Z" },
+ { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638, upload-time = "2024-08-06T20:32:18.555Z" },
+ { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850, upload-time = "2024-08-06T20:32:19.889Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980, upload-time = "2024-08-06T20:32:21.273Z" },
+ { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" },
+ { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" },
+ { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" },
+ { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" },
+ { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" },
+ { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
+]
+
+[[package]]
+name = "referencing"
+version = "0.36.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "attrs" },
+ { name = "rpds-py" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" },
+]
+
+[[package]]
+name = "regex"
+version = "2025.9.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b2/5a/4c63457fbcaf19d138d72b2e9b39405954f98c0349b31c601bfcb151582c/regex-2025.9.1.tar.gz", hash = "sha256:88ac07b38d20b54d79e704e38aa3bd2c0f8027432164226bdee201a1c0c9c9ff", size = 400852, upload-time = "2025-09-01T22:10:10.479Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/46/c1/ed9ef923156105a78aa004f9390e5dd87eadc29f5ca8840f172cadb638de/regex-2025.9.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5aa2a6a73bf218515484b36a0d20c6ad9dc63f6339ff6224147b0e2c095ee55", size = 484813, upload-time = "2025-09-01T22:07:45.528Z" },
+ { url = "https://files.pythonhosted.org/packages/05/de/97957618a774c67f892609eee2fafe3e30703fbbba66de5e6b79d7196dbc/regex-2025.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c2ff5c01d5e47ad5fc9d31bcd61e78c2fa0068ed00cab86b7320214446da766", size = 288981, upload-time = "2025-09-01T22:07:48.464Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/b0/441afadd0a6ffccbd58a9663e5bdd182daa237893e5f8ceec6ff9df4418a/regex-2025.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d49dc84e796b666181de8a9973284cad6616335f01b52bf099643253094920fc", size = 286608, upload-time = "2025-09-01T22:07:50.484Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/cf/d89aecaf17e999ab11a3ef73fc9ab8b64f4e156f121250ef84340b35338d/regex-2025.9.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9914fe1040874f83c15fcea86d94ea54091b0666eab330aaab69e30d106aabe", size = 780459, upload-time = "2025-09-01T22:07:52.34Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/05/05884594a9975a29597917bbdd6837f7b97e8ac23faf22d628aa781e58f7/regex-2025.9.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e71bceb3947362ec5eabd2ca0870bb78eae4edfc60c6c21495133c01b6cd2df4", size = 849276, upload-time = "2025-09-01T22:07:54.591Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/8d/2b3067506838d02096bf107beb129b2ce328cdf776d6474b7f542c0a7bfd/regex-2025.9.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:67a74456f410fe5e869239ee7a5423510fe5121549af133809d9591a8075893f", size = 897320, upload-time = "2025-09-01T22:07:56.129Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/b3/0f9f7766e980b900df0ba9901b52871a2e4203698fb35cdebd219240d5f7/regex-2025.9.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5c3b96ed0223b32dbdc53a83149b6de7ca3acd5acd9c8e64b42a166228abe29c", size = 789931, upload-time = "2025-09-01T22:07:57.834Z" },
+ { url = "https://files.pythonhosted.org/packages/47/9f/7b2f29c8f8b698eb44be5fc68e8b9c8d32e99635eac5defc98de114e9f35/regex-2025.9.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:113d5aa950f428faf46fd77d452df62ebb4cc6531cb619f6cc30a369d326bfbd", size = 780764, upload-time = "2025-09-01T22:07:59.413Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/ac/56176caa86155c14462531eb0a4ddc450d17ba8875001122b3b7c0cb01bf/regex-2025.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fcdeb38de4f7f3d69d798f4f371189061446792a84e7c92b50054c87aae9c07c", size = 773610, upload-time = "2025-09-01T22:08:01.042Z" },
+ { url = "https://files.pythonhosted.org/packages/39/e8/9d6b9bd43998268a9de2f35602077519cacc9cb149f7381758cf8f502ba7/regex-2025.9.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4bcdff370509164b67a6c8ec23c9fb40797b72a014766fdc159bb809bd74f7d8", size = 844090, upload-time = "2025-09-01T22:08:02.94Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/92/d89743b089005cae4cb81cc2fe177e180b7452e60f29de53af34349640f8/regex-2025.9.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:7383efdf6e8e8c61d85e00cfb2e2e18da1a621b8bfb4b0f1c2747db57b942b8f", size = 834775, upload-time = "2025-09-01T22:08:04.781Z" },
+ { url = "https://files.pythonhosted.org/packages/01/8f/86a3e0aaa89295d2a3445bb238e56369963ef6b02a5b4aa3362f4e687413/regex-2025.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1ec2bd3bdf0f73f7e9f48dca550ba7d973692d5e5e9a90ac42cc5f16c4432d8b", size = 778521, upload-time = "2025-09-01T22:08:06.596Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/df/72072acb370ee8577c255717f8a58264f1d0de40aa3c9e6ebd5271cac633/regex-2025.9.1-cp310-cp310-win32.whl", hash = "sha256:9627e887116c4e9c0986d5c3b4f52bcfe3df09850b704f62ec3cbf177a0ae374", size = 264105, upload-time = "2025-09-01T22:08:08.708Z" },
+ { url = "https://files.pythonhosted.org/packages/97/73/fb82faaf0375aeaa1bb675008246c79b6779fa5688585a35327610ea0e2e/regex-2025.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:94533e32dc0065eca43912ee6649c90ea0681d59f56d43c45b5bcda9a740b3dd", size = 276131, upload-time = "2025-09-01T22:08:10.156Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/3a/77d7718a2493e54725494f44da1a1e55704743dc4b8fabe5b0596f7b8014/regex-2025.9.1-cp310-cp310-win_arm64.whl", hash = "sha256:a874a61bb580d48642ffd338570ee24ab13fa023779190513fcacad104a6e251", size = 268462, upload-time = "2025-09-01T22:08:11.651Z" },
+ { url = "https://files.pythonhosted.org/packages/06/4d/f741543c0c59f96c6625bc6c11fea1da2e378b7d293ffff6f318edc0ce14/regex-2025.9.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e5bcf112b09bfd3646e4db6bf2e598534a17d502b0c01ea6550ba4eca780c5e6", size = 484811, upload-time = "2025-09-01T22:08:12.834Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/bd/27e73e92635b6fbd51afc26a414a3133243c662949cd1cda677fe7bb09bd/regex-2025.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:67a0295a3c31d675a9ee0238d20238ff10a9a2fdb7a1323c798fc7029578b15c", size = 288977, upload-time = "2025-09-01T22:08:14.499Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/7d/7dc0c6efc8bc93cd6e9b947581f5fde8a5dbaa0af7c4ec818c5729fdc807/regex-2025.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea8267fbadc7d4bd7c1301a50e85c2ff0de293ff9452a1a9f8d82c6cafe38179", size = 286606, upload-time = "2025-09-01T22:08:15.881Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/01/9b5c6dd394f97c8f2c12f6e8f96879c9ac27292a718903faf2e27a0c09f6/regex-2025.9.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6aeff21de7214d15e928fb5ce757f9495214367ba62875100d4c18d293750cc1", size = 792436, upload-time = "2025-09-01T22:08:17.38Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/24/b7430cfc6ee34bbb3db6ff933beb5e7692e5cc81e8f6f4da63d353566fb0/regex-2025.9.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d89f1bbbbbc0885e1c230f7770d5e98f4f00b0ee85688c871d10df8b184a6323", size = 858705, upload-time = "2025-09-01T22:08:19.037Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/98/155f914b4ea6ae012663188545c4f5216c11926d09b817127639d618b003/regex-2025.9.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ca3affe8ddea498ba9d294ab05f5f2d3b5ad5d515bc0d4a9016dd592a03afe52", size = 905881, upload-time = "2025-09-01T22:08:20.377Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/a7/a470e7bc8259c40429afb6d6a517b40c03f2f3e455c44a01abc483a1c512/regex-2025.9.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:91892a7a9f0a980e4c2c85dd19bc14de2b219a3a8867c4b5664b9f972dcc0c78", size = 798968, upload-time = "2025-09-01T22:08:22.081Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/fa/33f6fec4d41449fea5f62fdf5e46d668a1c046730a7f4ed9f478331a8e3a/regex-2025.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e1cb40406f4ae862710615f9f636c1e030fd6e6abe0e0f65f6a695a2721440c6", size = 781884, upload-time = "2025-09-01T22:08:23.832Z" },
+ { url = "https://files.pythonhosted.org/packages/42/de/2b45f36ab20da14eedddf5009d370625bc5942d9953fa7e5037a32d66843/regex-2025.9.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:94f6cff6f7e2149c7e6499a6ecd4695379eeda8ccbccb9726e8149f2fe382e92", size = 852935, upload-time = "2025-09-01T22:08:25.536Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/f9/878f4fc92c87e125e27aed0f8ee0d1eced9b541f404b048f66f79914475a/regex-2025.9.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:6c0226fb322b82709e78c49cc33484206647f8a39954d7e9de1567f5399becd0", size = 844340, upload-time = "2025-09-01T22:08:27.141Z" },
+ { url = "https://files.pythonhosted.org/packages/90/c2/5b6f2bce6ece5f8427c718c085eca0de4bbb4db59f54db77aa6557aef3e9/regex-2025.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a12f59c7c380b4fcf7516e9cbb126f95b7a9518902bcf4a852423ff1dcd03e6a", size = 787238, upload-time = "2025-09-01T22:08:28.75Z" },
+ { url = "https://files.pythonhosted.org/packages/47/66/1ef1081c831c5b611f6f55f6302166cfa1bc9574017410ba5595353f846a/regex-2025.9.1-cp311-cp311-win32.whl", hash = "sha256:49865e78d147a7a4f143064488da5d549be6bfc3f2579e5044cac61f5c92edd4", size = 264118, upload-time = "2025-09-01T22:08:30.388Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/e0/8adc550d7169df1d6b9be8ff6019cda5291054a0107760c2f30788b6195f/regex-2025.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:d34b901f6f2f02ef60f4ad3855d3a02378c65b094efc4b80388a3aeb700a5de7", size = 276151, upload-time = "2025-09-01T22:08:32.073Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/bd/46fef29341396d955066e55384fb93b0be7d64693842bf4a9a398db6e555/regex-2025.9.1-cp311-cp311-win_arm64.whl", hash = "sha256:47d7c2dab7e0b95b95fd580087b6ae196039d62306a592fa4e162e49004b6299", size = 268460, upload-time = "2025-09-01T22:08:33.281Z" },
+ { url = "https://files.pythonhosted.org/packages/39/ef/a0372febc5a1d44c1be75f35d7e5aff40c659ecde864d7fa10e138f75e74/regex-2025.9.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:84a25164bd8dcfa9f11c53f561ae9766e506e580b70279d05a7946510bdd6f6a", size = 486317, upload-time = "2025-09-01T22:08:34.529Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/25/d64543fb7eb41a1024786d518cc57faf1ce64aa6e9ddba097675a0c2f1d2/regex-2025.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:645e88a73861c64c1af558dd12294fb4e67b5c1eae0096a60d7d8a2143a611c7", size = 289698, upload-time = "2025-09-01T22:08:36.162Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/dc/fbf31fc60be317bd9f6f87daa40a8a9669b3b392aa8fe4313df0a39d0722/regex-2025.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:10a450cba5cd5409526ee1d4449f42aad38dd83ac6948cbd6d7f71ca7018f7db", size = 287242, upload-time = "2025-09-01T22:08:37.794Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/74/f933a607a538f785da5021acf5323961b4620972e2c2f1f39b6af4b71db7/regex-2025.9.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9dc5991592933a4192c166eeb67b29d9234f9c86344481173d1bc52f73a7104", size = 797441, upload-time = "2025-09-01T22:08:39.108Z" },
+ { url = "https://files.pythonhosted.org/packages/89/d0/71fc49b4f20e31e97f199348b8c4d6e613e7b6a54a90eb1b090c2b8496d7/regex-2025.9.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a32291add816961aab472f4fad344c92871a2ee33c6c219b6598e98c1f0108f2", size = 862654, upload-time = "2025-09-01T22:08:40.586Z" },
+ { url = "https://files.pythonhosted.org/packages/59/05/984edce1411a5685ba9abbe10d42cdd9450aab4a022271f9585539788150/regex-2025.9.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:588c161a68a383478e27442a678e3b197b13c5ba51dbba40c1ccb8c4c7bee9e9", size = 910862, upload-time = "2025-09-01T22:08:42.416Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/02/5c891bb5fe0691cc1bad336e3a94b9097fbcf9707ec8ddc1dce9f0397289/regex-2025.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47829ffaf652f30d579534da9085fe30c171fa2a6744a93d52ef7195dc38218b", size = 801991, upload-time = "2025-09-01T22:08:44.072Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/ae/fd10d6ad179910f7a1b3e0a7fde1ef8bb65e738e8ac4fd6ecff3f52252e4/regex-2025.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e978e5a35b293ea43f140c92a3269b6ab13fe0a2bf8a881f7ac740f5a6ade85", size = 786651, upload-time = "2025-09-01T22:08:46.079Z" },
+ { url = "https://files.pythonhosted.org/packages/30/cf/9d686b07bbc5bf94c879cc168db92542d6bc9fb67088d03479fef09ba9d3/regex-2025.9.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4cf09903e72411f4bf3ac1eddd624ecfd423f14b2e4bf1c8b547b72f248b7bf7", size = 856556, upload-time = "2025-09-01T22:08:48.376Z" },
+ { url = "https://files.pythonhosted.org/packages/91/9d/302f8a29bb8a49528abbab2d357a793e2a59b645c54deae0050f8474785b/regex-2025.9.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d016b0f77be63e49613c9e26aaf4a242f196cd3d7a4f15898f5f0ab55c9b24d2", size = 849001, upload-time = "2025-09-01T22:08:50.067Z" },
+ { url = "https://files.pythonhosted.org/packages/93/fa/b4c6dbdedc85ef4caec54c817cd5f4418dbfa2453214119f2538082bf666/regex-2025.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:656563e620de6908cd1c9d4f7b9e0777e3341ca7db9d4383bcaa44709c90281e", size = 788138, upload-time = "2025-09-01T22:08:51.933Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/1b/91ee17a3cbf87f81e8c110399279d0e57f33405468f6e70809100f2ff7d8/regex-2025.9.1-cp312-cp312-win32.whl", hash = "sha256:df33f4ef07b68f7ab637b1dbd70accbf42ef0021c201660656601e8a9835de45", size = 264524, upload-time = "2025-09-01T22:08:53.75Z" },
+ { url = "https://files.pythonhosted.org/packages/92/28/6ba31cce05b0f1ec6b787921903f83bd0acf8efde55219435572af83c350/regex-2025.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:5aba22dfbc60cda7c0853516104724dc904caa2db55f2c3e6e984eb858d3edf3", size = 275489, upload-time = "2025-09-01T22:08:55.037Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/ed/ea49f324db00196e9ef7fe00dd13c6164d5173dd0f1bbe495e61bb1fb09d/regex-2025.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:ec1efb4c25e1849c2685fa95da44bfde1b28c62d356f9c8d861d4dad89ed56e9", size = 268589, upload-time = "2025-09-01T22:08:56.369Z" },
+ { url = "https://files.pythonhosted.org/packages/98/25/b2959ce90c6138c5142fe5264ee1f9b71a0c502ca4c7959302a749407c79/regex-2025.9.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bc6834727d1b98d710a63e6c823edf6ffbf5792eba35d3fa119531349d4142ef", size = 485932, upload-time = "2025-09-01T22:08:57.913Z" },
+ { url = "https://files.pythonhosted.org/packages/49/2e/6507a2a85f3f2be6643438b7bd976e67ad73223692d6988eb1ff444106d3/regex-2025.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c3dc05b6d579875719bccc5f3037b4dc80433d64e94681a0061845bd8863c025", size = 289568, upload-time = "2025-09-01T22:08:59.258Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/d8/de4a4b57215d99868f1640e062a7907e185ec7476b4b689e2345487c1ff4/regex-2025.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22213527df4c985ec4a729b055a8306272d41d2f45908d7bacb79be0fa7a75ad", size = 286984, upload-time = "2025-09-01T22:09:00.835Z" },
+ { url = "https://files.pythonhosted.org/packages/03/15/e8cb403403a57ed316e80661db0e54d7aa2efcd85cb6156f33cc18746922/regex-2025.9.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8e3f6e3c5a5a1adc3f7ea1b5aec89abfc2f4fbfba55dafb4343cd1d084f715b2", size = 797514, upload-time = "2025-09-01T22:09:02.538Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/26/2446f2b9585fed61faaa7e2bbce3aca7dd8df6554c32addee4c4caecf24a/regex-2025.9.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bcb89c02a0d6c2bec9b0bb2d8c78782699afe8434493bfa6b4021cc51503f249", size = 862586, upload-time = "2025-09-01T22:09:04.322Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/b8/82ffbe9c0992c31bbe6ae1c4b4e21269a5df2559102b90543c9b56724c3c/regex-2025.9.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b0e2f95413eb0c651cd1516a670036315b91b71767af83bc8525350d4375ccba", size = 910815, upload-time = "2025-09-01T22:09:05.978Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/d8/7303ea38911759c1ee30cc5bc623ee85d3196b733c51fd6703c34290a8d9/regex-2025.9.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a41dc039e1c97d3c2ed3e26523f748e58c4de3ea7a31f95e1cf9ff973fff5a", size = 802042, upload-time = "2025-09-01T22:09:07.865Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/0e/6ad51a55ed4b5af512bb3299a05d33309bda1c1d1e1808fa869a0bed31bc/regex-2025.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f0b4258b161094f66857a26ee938d3fe7b8a5063861e44571215c44fbf0e5df", size = 786764, upload-time = "2025-09-01T22:09:09.362Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/d5/394e3ffae6baa5a9217bbd14d96e0e5da47bb069d0dbb8278e2681a2b938/regex-2025.9.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bf70e18ac390e6977ea7e56f921768002cb0fa359c4199606c7219854ae332e0", size = 856557, upload-time = "2025-09-01T22:09:11.129Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/80/b288d3910c41194ad081b9fb4b371b76b0bbfdce93e7709fc98df27b37dc/regex-2025.9.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b84036511e1d2bb0a4ff1aec26951caa2dea8772b223c9e8a19ed8885b32dbac", size = 849108, upload-time = "2025-09-01T22:09:12.877Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/cd/5ec76bf626d0d5abdc277b7a1734696f5f3d14fbb4a3e2540665bc305d85/regex-2025.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c2e05dcdfe224047f2a59e70408274c325d019aad96227ab959403ba7d58d2d7", size = 788201, upload-time = "2025-09-01T22:09:14.561Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/36/674672f3fdead107565a2499f3007788b878188acec6d42bc141c5366c2c/regex-2025.9.1-cp313-cp313-win32.whl", hash = "sha256:3b9a62107a7441b81ca98261808fed30ae36ba06c8b7ee435308806bd53c1ed8", size = 264508, upload-time = "2025-09-01T22:09:16.193Z" },
+ { url = "https://files.pythonhosted.org/packages/83/ad/931134539515eb64ce36c24457a98b83c1b2e2d45adf3254b94df3735a76/regex-2025.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:b38afecc10c177eb34cfae68d669d5161880849ba70c05cbfbe409f08cc939d7", size = 275469, upload-time = "2025-09-01T22:09:17.462Z" },
+ { url = "https://files.pythonhosted.org/packages/24/8c/96d34e61c0e4e9248836bf86d69cb224fd222f270fa9045b24e218b65604/regex-2025.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:ec329890ad5e7ed9fc292858554d28d58d56bf62cf964faf0aa57964b21155a0", size = 268586, upload-time = "2025-09-01T22:09:18.948Z" },
+ { url = "https://files.pythonhosted.org/packages/21/b1/453cbea5323b049181ec6344a803777914074b9726c9c5dc76749966d12d/regex-2025.9.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:72fb7a016467d364546f22b5ae86c45680a4e0de6b2a6f67441d22172ff641f1", size = 486111, upload-time = "2025-09-01T22:09:20.734Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/0e/92577f197bd2f7652c5e2857f399936c1876978474ecc5b068c6d8a79c86/regex-2025.9.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c9527fa74eba53f98ad86be2ba003b3ebe97e94b6eb2b916b31b5f055622ef03", size = 289520, upload-time = "2025-09-01T22:09:22.249Z" },
+ { url = "https://files.pythonhosted.org/packages/af/c6/b472398116cca7ea5a6c4d5ccd0fc543f7fd2492cb0c48d2852a11972f73/regex-2025.9.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c905d925d194c83a63f92422af7544ec188301451b292c8b487f0543726107ca", size = 287215, upload-time = "2025-09-01T22:09:23.657Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/11/f12ecb0cf9ca792a32bb92f758589a84149017467a544f2f6bfb45c0356d/regex-2025.9.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:74df7c74a63adcad314426b1f4ea6054a5ab25d05b0244f0c07ff9ce640fa597", size = 797855, upload-time = "2025-09-01T22:09:25.197Z" },
+ { url = "https://files.pythonhosted.org/packages/46/88/bbb848f719a540fb5997e71310f16f0b33a92c5d4b4d72d4311487fff2a3/regex-2025.9.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4f6e935e98ea48c7a2e8be44494de337b57a204470e7f9c9c42f912c414cd6f5", size = 863363, upload-time = "2025-09-01T22:09:26.705Z" },
+ { url = "https://files.pythonhosted.org/packages/54/a9/2321eb3e2838f575a78d48e03c1e83ea61bd08b74b7ebbdeca8abc50fc25/regex-2025.9.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4a62d033cd9ebefc7c5e466731a508dfabee827d80b13f455de68a50d3c2543d", size = 910202, upload-time = "2025-09-01T22:09:28.906Z" },
+ { url = "https://files.pythonhosted.org/packages/33/07/d1d70835d7d11b7e126181f316f7213c4572ecf5c5c97bdbb969fb1f38a2/regex-2025.9.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef971ebf2b93bdc88d8337238be4dfb851cc97ed6808eb04870ef67589415171", size = 801808, upload-time = "2025-09-01T22:09:30.733Z" },
+ { url = "https://files.pythonhosted.org/packages/13/d1/29e4d1bed514ef2bf3a4ead3cb8bb88ca8af94130239a4e68aa765c35b1c/regex-2025.9.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d936a1db208bdca0eca1f2bb2c1ba1d8370b226785c1e6db76e32a228ffd0ad5", size = 786824, upload-time = "2025-09-01T22:09:32.61Z" },
+ { url = "https://files.pythonhosted.org/packages/33/27/20d8ccb1bee460faaa851e6e7cc4cfe852a42b70caa1dca22721ba19f02f/regex-2025.9.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:7e786d9e4469698fc63815b8de08a89165a0aa851720eb99f5e0ea9d51dd2b6a", size = 857406, upload-time = "2025-09-01T22:09:34.117Z" },
+ { url = "https://files.pythonhosted.org/packages/74/fe/60c6132262dc36430d51e0c46c49927d113d3a38c1aba6a26c7744c84cf3/regex-2025.9.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:6b81d7dbc5466ad2c57ce3a0ddb717858fe1a29535c8866f8514d785fdb9fc5b", size = 848593, upload-time = "2025-09-01T22:09:35.598Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/ae/2d4ff915622fabbef1af28387bf71e7f2f4944a348b8460d061e85e29bf0/regex-2025.9.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:cd4890e184a6feb0ef195338a6ce68906a8903a0f2eb7e0ab727dbc0a3156273", size = 787951, upload-time = "2025-09-01T22:09:37.139Z" },
+ { url = "https://files.pythonhosted.org/packages/85/37/dc127703a9e715a284cc2f7dbdd8a9776fd813c85c126eddbcbdd1ca5fec/regex-2025.9.1-cp314-cp314-win32.whl", hash = "sha256:34679a86230e46164c9e0396b56cab13c0505972343880b9e705083cc5b8ec86", size = 269833, upload-time = "2025-09-01T22:09:39.245Z" },
+ { url = "https://files.pythonhosted.org/packages/83/bf/4bed4d3d0570e16771defd5f8f15f7ea2311edcbe91077436d6908956c4a/regex-2025.9.1-cp314-cp314-win_amd64.whl", hash = "sha256:a1196e530a6bfa5f4bde029ac5b0295a6ecfaaffbfffede4bbaf4061d9455b70", size = 278742, upload-time = "2025-09-01T22:09:40.651Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/3e/7d7ac6fd085023312421e0d69dfabdfb28e116e513fadbe9afe710c01893/regex-2025.9.1-cp314-cp314-win_arm64.whl", hash = "sha256:f46d525934871ea772930e997d577d48c6983e50f206ff7b66d4ac5f8941e993", size = 271860, upload-time = "2025-09-01T22:09:42.413Z" },
+]
+
+[[package]]
+name = "requests"
+version = "2.32.5"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "charset-normalizer" },
+ { name = "idna" },
+ { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
+]
+
+[[package]]
+name = "rich"
+version = "14.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markdown-it-py" },
+ { name = "pygments" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fe/75/af448d8e52bf1d8fa6a9d089ca6c07ff4453d86c65c145d0a300bb073b9b/rich-14.1.0.tar.gz", hash = "sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8", size = 224441, upload-time = "2025-07-25T07:32:58.125Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e3/30/3c4d035596d3cf444529e0b2953ad0466f6049528a879d27534700580395/rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f", size = 243368, upload-time = "2025-07-25T07:32:56.73Z" },
+]
+
+[[package]]
+name = "rpds-py"
+version = "0.27.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479, upload-time = "2025-08-27T12:16:36.024Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a5/ed/3aef893e2dd30e77e35d20d4ddb45ca459db59cead748cad9796ad479411/rpds_py-0.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:68afeec26d42ab3b47e541b272166a0b4400313946871cba3ed3a4fc0cab1cef", size = 371606, upload-time = "2025-08-27T12:12:25.189Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/82/9818b443e5d3eb4c83c3994561387f116aae9833b35c484474769c4a8faf/rpds_py-0.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74e5b2f7bb6fa38b1b10546d27acbacf2a022a8b5543efb06cfebc72a59c85be", size = 353452, upload-time = "2025-08-27T12:12:27.433Z" },
+ { url = "https://files.pythonhosted.org/packages/99/c7/d2a110ffaaa397fc6793a83c7bd3545d9ab22658b7cdff05a24a4535cc45/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9024de74731df54546fab0bfbcdb49fae19159ecaecfc8f37c18d2c7e2c0bd61", size = 381519, upload-time = "2025-08-27T12:12:28.719Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/bc/e89581d1f9d1be7d0247eaef602566869fdc0d084008ba139e27e775366c/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:31d3ebadefcd73b73928ed0b2fd696f7fefda8629229f81929ac9c1854d0cffb", size = 394424, upload-time = "2025-08-27T12:12:30.207Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/2e/36a6861f797530e74bb6ed53495f8741f1ef95939eed01d761e73d559067/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2e7f8f169d775dd9092a1743768d771f1d1300453ddfe6325ae3ab5332b4657", size = 523467, upload-time = "2025-08-27T12:12:31.808Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/59/c1bc2be32564fa499f988f0a5c6505c2f4746ef96e58e4d7de5cf923d77e/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d905d16f77eb6ab2e324e09bfa277b4c8e5e6b8a78a3e7ff8f3cdf773b4c013", size = 402660, upload-time = "2025-08-27T12:12:33.444Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/ec/ef8bf895f0628dd0a59e54d81caed6891663cb9c54a0f4bb7da918cb88cf/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50c946f048209e6362e22576baea09193809f87687a95a8db24e5fbdb307b93a", size = 384062, upload-time = "2025-08-27T12:12:34.857Z" },
+ { url = "https://files.pythonhosted.org/packages/69/f7/f47ff154be8d9a5e691c083a920bba89cef88d5247c241c10b9898f595a1/rpds_py-0.27.1-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:3deab27804d65cd8289eb814c2c0e807c4b9d9916c9225e363cb0cf875eb67c1", size = 401289, upload-time = "2025-08-27T12:12:36.085Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/d9/ca410363efd0615814ae579f6829cafb39225cd63e5ea5ed1404cb345293/rpds_py-0.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8b61097f7488de4be8244c89915da8ed212832ccf1e7c7753a25a394bf9b1f10", size = 417718, upload-time = "2025-08-27T12:12:37.401Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/a0/8cb5c2ff38340f221cc067cc093d1270e10658ba4e8d263df923daa18e86/rpds_py-0.27.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8a3f29aba6e2d7d90528d3c792555a93497fe6538aa65eb675b44505be747808", size = 558333, upload-time = "2025-08-27T12:12:38.672Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/8c/1b0de79177c5d5103843774ce12b84caa7164dfc6cd66378768d37db11bf/rpds_py-0.27.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dd6cd0485b7d347304067153a6dc1d73f7d4fd995a396ef32a24d24b8ac63ac8", size = 589127, upload-time = "2025-08-27T12:12:41.48Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/5e/26abb098d5e01266b0f3a2488d299d19ccc26849735d9d2b95c39397e945/rpds_py-0.27.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f4461bf931108c9fa226ffb0e257c1b18dc2d44cd72b125bec50ee0ab1248a9", size = 554899, upload-time = "2025-08-27T12:12:42.925Z" },
+ { url = "https://files.pythonhosted.org/packages/de/41/905cc90ced13550db017f8f20c6d8e8470066c5738ba480d7ba63e3d136b/rpds_py-0.27.1-cp310-cp310-win32.whl", hash = "sha256:ee5422d7fb21f6a00c1901bf6559c49fee13a5159d0288320737bbf6585bd3e4", size = 217450, upload-time = "2025-08-27T12:12:44.813Z" },
+ { url = "https://files.pythonhosted.org/packages/75/3d/6bef47b0e253616ccdf67c283e25f2d16e18ccddd38f92af81d5a3420206/rpds_py-0.27.1-cp310-cp310-win_amd64.whl", hash = "sha256:3e039aabf6d5f83c745d5f9a0a381d031e9ed871967c0a5c38d201aca41f3ba1", size = 228447, upload-time = "2025-08-27T12:12:46.204Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/c1/7907329fbef97cbd49db6f7303893bd1dd5a4a3eae415839ffdfb0762cae/rpds_py-0.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:be898f271f851f68b318872ce6ebebbc62f303b654e43bf72683dbdc25b7c881", size = 371063, upload-time = "2025-08-27T12:12:47.856Z" },
+ { url = "https://files.pythonhosted.org/packages/11/94/2aab4bc86228bcf7c48760990273653a4900de89c7537ffe1b0d6097ed39/rpds_py-0.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62ac3d4e3e07b58ee0ddecd71d6ce3b1637de2d373501412df395a0ec5f9beb5", size = 353210, upload-time = "2025-08-27T12:12:49.187Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/57/f5eb3ecf434342f4f1a46009530e93fd201a0b5b83379034ebdb1d7c1a58/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4708c5c0ceb2d034f9991623631d3d23cb16e65c83736ea020cdbe28d57c0a0e", size = 381636, upload-time = "2025-08-27T12:12:50.492Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/f4/ef95c5945e2ceb5119571b184dd5a1cc4b8541bbdf67461998cfeac9cb1e/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:abfa1171a9952d2e0002aba2ad3780820b00cc3d9c98c6630f2e93271501f66c", size = 394341, upload-time = "2025-08-27T12:12:52.024Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/7e/4bd610754bf492d398b61725eb9598ddd5eb86b07d7d9483dbcd810e20bc/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b507d19f817ebaca79574b16eb2ae412e5c0835542c93fe9983f1e432aca195", size = 523428, upload-time = "2025-08-27T12:12:53.779Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/e5/059b9f65a8c9149361a8b75094864ab83b94718344db511fd6117936ed2a/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168b025f8fd8d8d10957405f3fdcef3dc20f5982d398f90851f4abc58c566c52", size = 402923, upload-time = "2025-08-27T12:12:55.15Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/48/64cabb7daced2968dd08e8a1b7988bf358d7bd5bcd5dc89a652f4668543c/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb56c6210ef77caa58e16e8c17d35c63fe3f5b60fd9ba9d424470c3400bcf9ed", size = 384094, upload-time = "2025-08-27T12:12:57.194Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/e1/dc9094d6ff566bff87add8a510c89b9e158ad2ecd97ee26e677da29a9e1b/rpds_py-0.27.1-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:d252f2d8ca0195faa707f8eb9368955760880b2b42a8ee16d382bf5dd807f89a", size = 401093, upload-time = "2025-08-27T12:12:58.985Z" },
+ { url = "https://files.pythonhosted.org/packages/37/8e/ac8577e3ecdd5593e283d46907d7011618994e1d7ab992711ae0f78b9937/rpds_py-0.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6e5e54da1e74b91dbc7996b56640f79b195d5925c2b78efaa8c5d53e1d88edde", size = 417969, upload-time = "2025-08-27T12:13:00.367Z" },
+ { url = "https://files.pythonhosted.org/packages/66/6d/87507430a8f74a93556fe55c6485ba9c259949a853ce407b1e23fea5ba31/rpds_py-0.27.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ffce0481cc6e95e5b3f0a47ee17ffbd234399e6d532f394c8dce320c3b089c21", size = 558302, upload-time = "2025-08-27T12:13:01.737Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/bb/1db4781ce1dda3eecc735e3152659a27b90a02ca62bfeea17aee45cc0fbc/rpds_py-0.27.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a205fdfe55c90c2cd8e540ca9ceba65cbe6629b443bc05db1f590a3db8189ff9", size = 589259, upload-time = "2025-08-27T12:13:03.127Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/0e/ae1c8943d11a814d01b482e1f8da903f88047a962dff9bbdadf3bd6e6fd1/rpds_py-0.27.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:689fb5200a749db0415b092972e8eba85847c23885c8543a8b0f5c009b1a5948", size = 554983, upload-time = "2025-08-27T12:13:04.516Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/d5/0b2a55415931db4f112bdab072443ff76131b5ac4f4dc98d10d2d357eb03/rpds_py-0.27.1-cp311-cp311-win32.whl", hash = "sha256:3182af66048c00a075010bc7f4860f33913528a4b6fc09094a6e7598e462fe39", size = 217154, upload-time = "2025-08-27T12:13:06.278Z" },
+ { url = "https://files.pythonhosted.org/packages/24/75/3b7ffe0d50dc86a6a964af0d1cc3a4a2cdf437cb7b099a4747bbb96d1819/rpds_py-0.27.1-cp311-cp311-win_amd64.whl", hash = "sha256:b4938466c6b257b2f5c4ff98acd8128ec36b5059e5c8f8372d79316b1c36bb15", size = 228627, upload-time = "2025-08-27T12:13:07.625Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/3f/4fd04c32abc02c710f09a72a30c9a55ea3cc154ef8099078fd50a0596f8e/rpds_py-0.27.1-cp311-cp311-win_arm64.whl", hash = "sha256:2f57af9b4d0793e53266ee4325535a31ba48e2f875da81a9177c9926dfa60746", size = 220998, upload-time = "2025-08-27T12:13:08.972Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/fe/38de28dee5df58b8198c743fe2bea0c785c6d40941b9950bac4cdb71a014/rpds_py-0.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ae2775c1973e3c30316892737b91f9283f9908e3cc7625b9331271eaaed7dc90", size = 361887, upload-time = "2025-08-27T12:13:10.233Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/9a/4b6c7eedc7dd90986bf0fab6ea2a091ec11c01b15f8ba0a14d3f80450468/rpds_py-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2643400120f55c8a96f7c9d858f7be0c88d383cd4653ae2cf0d0c88f668073e5", size = 345795, upload-time = "2025-08-27T12:13:11.65Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/0e/e650e1b81922847a09cca820237b0edee69416a01268b7754d506ade11ad/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16323f674c089b0360674a4abd28d5042947d54ba620f72514d69be4ff64845e", size = 385121, upload-time = "2025-08-27T12:13:13.008Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/ea/b306067a712988e2bff00dcc7c8f31d26c29b6d5931b461aa4b60a013e33/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a1f4814b65eacac94a00fc9a526e3fdafd78e439469644032032d0d63de4881", size = 398976, upload-time = "2025-08-27T12:13:14.368Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/0a/26dc43c8840cb8fe239fe12dbc8d8de40f2365e838f3d395835dde72f0e5/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ba32c16b064267b22f1850a34051121d423b6f7338a12b9459550eb2096e7ec", size = 525953, upload-time = "2025-08-27T12:13:15.774Z" },
+ { url = "https://files.pythonhosted.org/packages/22/14/c85e8127b573aaf3a0cbd7fbb8c9c99e735a4a02180c84da2a463b766e9e/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c20f33fd10485b80f65e800bbe5f6785af510b9f4056c5a3c612ebc83ba6cb", size = 407915, upload-time = "2025-08-27T12:13:17.379Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/7b/8f4fee9ba1fb5ec856eb22d725a4efa3deb47f769597c809e03578b0f9d9/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466bfe65bd932da36ff279ddd92de56b042f2266d752719beb97b08526268ec5", size = 386883, upload-time = "2025-08-27T12:13:18.704Z" },
+ { url = "https://files.pythonhosted.org/packages/86/47/28fa6d60f8b74fcdceba81b272f8d9836ac0340570f68f5df6b41838547b/rpds_py-0.27.1-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:41e532bbdcb57c92ba3be62c42e9f096431b4cf478da9bc3bc6ce5c38ab7ba7a", size = 405699, upload-time = "2025-08-27T12:13:20.089Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/fd/c5987b5e054548df56953a21fe2ebed51fc1ec7c8f24fd41c067b68c4a0a/rpds_py-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f149826d742b406579466283769a8ea448eed82a789af0ed17b0cd5770433444", size = 423713, upload-time = "2025-08-27T12:13:21.436Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/ba/3c4978b54a73ed19a7d74531be37a8bcc542d917c770e14d372b8daea186/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80c60cfb5310677bd67cb1e85a1e8eb52e12529545441b43e6f14d90b878775a", size = 562324, upload-time = "2025-08-27T12:13:22.789Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/6c/6943a91768fec16db09a42b08644b960cff540c66aab89b74be6d4a144ba/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7ee6521b9baf06085f62ba9c7a3e5becffbc32480d2f1b351559c001c38ce4c1", size = 593646, upload-time = "2025-08-27T12:13:24.122Z" },
+ { url = "https://files.pythonhosted.org/packages/11/73/9d7a8f4be5f4396f011a6bb7a19fe26303a0dac9064462f5651ced2f572f/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a512c8263249a9d68cac08b05dd59d2b3f2061d99b322813cbcc14c3c7421998", size = 558137, upload-time = "2025-08-27T12:13:25.557Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/96/6772cbfa0e2485bcceef8071de7821f81aeac8bb45fbfd5542a3e8108165/rpds_py-0.27.1-cp312-cp312-win32.whl", hash = "sha256:819064fa048ba01b6dadc5116f3ac48610435ac9a0058bbde98e569f9e785c39", size = 221343, upload-time = "2025-08-27T12:13:26.967Z" },
+ { url = "https://files.pythonhosted.org/packages/67/b6/c82f0faa9af1c6a64669f73a17ee0eeef25aff30bb9a1c318509efe45d84/rpds_py-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:d9199717881f13c32c4046a15f024971a3b78ad4ea029e8da6b86e5aa9cf4594", size = 232497, upload-time = "2025-08-27T12:13:28.326Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/96/2817b44bd2ed11aebacc9251da03689d56109b9aba5e311297b6902136e2/rpds_py-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:33aa65b97826a0e885ef6e278fbd934e98cdcfed80b63946025f01e2f5b29502", size = 222790, upload-time = "2025-08-27T12:13:29.71Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/77/610aeee8d41e39080c7e14afa5387138e3c9fa9756ab893d09d99e7d8e98/rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b", size = 361741, upload-time = "2025-08-27T12:13:31.039Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/fc/c43765f201c6a1c60be2043cbdb664013def52460a4c7adace89d6682bf4/rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf", size = 345574, upload-time = "2025-08-27T12:13:32.902Z" },
+ { url = "https://files.pythonhosted.org/packages/20/42/ee2b2ca114294cd9847d0ef9c26d2b0851b2e7e00bf14cc4c0b581df0fc3/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83", size = 385051, upload-time = "2025-08-27T12:13:34.228Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/e8/1e430fe311e4799e02e2d1af7c765f024e95e17d651612425b226705f910/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf", size = 398395, upload-time = "2025-08-27T12:13:36.132Z" },
+ { url = "https://files.pythonhosted.org/packages/82/95/9dc227d441ff2670651c27a739acb2535ccaf8b351a88d78c088965e5996/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2", size = 524334, upload-time = "2025-08-27T12:13:37.562Z" },
+ { url = "https://files.pythonhosted.org/packages/87/01/a670c232f401d9ad461d9a332aa4080cd3cb1d1df18213dbd0d2a6a7ab51/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0", size = 407691, upload-time = "2025-08-27T12:13:38.94Z" },
+ { url = "https://files.pythonhosted.org/packages/03/36/0a14aebbaa26fe7fab4780c76f2239e76cc95a0090bdb25e31d95c492fcd/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418", size = 386868, upload-time = "2025-08-27T12:13:40.192Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/03/8c897fb8b5347ff6c1cc31239b9611c5bf79d78c984430887a353e1409a1/rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d", size = 405469, upload-time = "2025-08-27T12:13:41.496Z" },
+ { url = "https://files.pythonhosted.org/packages/da/07/88c60edc2df74850d496d78a1fdcdc7b54360a7f610a4d50008309d41b94/rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274", size = 422125, upload-time = "2025-08-27T12:13:42.802Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/86/5f4c707603e41b05f191a749984f390dabcbc467cf833769b47bf14ba04f/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd", size = 562341, upload-time = "2025-08-27T12:13:44.472Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/92/3c0cb2492094e3cd9baf9e49bbb7befeceb584ea0c1a8b5939dca4da12e5/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2", size = 592511, upload-time = "2025-08-27T12:13:45.898Z" },
+ { url = "https://files.pythonhosted.org/packages/10/bb/82e64fbb0047c46a168faa28d0d45a7851cd0582f850b966811d30f67ad8/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002", size = 557736, upload-time = "2025-08-27T12:13:47.408Z" },
+ { url = "https://files.pythonhosted.org/packages/00/95/3c863973d409210da7fb41958172c6b7dbe7fc34e04d3cc1f10bb85e979f/rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3", size = 221462, upload-time = "2025-08-27T12:13:48.742Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/2c/5867b14a81dc217b56d95a9f2a40fdbc56a1ab0181b80132beeecbd4b2d6/rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83", size = 232034, upload-time = "2025-08-27T12:13:50.11Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/78/3958f3f018c01923823f1e47f1cc338e398814b92d83cd278364446fac66/rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d", size = 222392, upload-time = "2025-08-27T12:13:52.587Z" },
+ { url = "https://files.pythonhosted.org/packages/01/76/1cdf1f91aed5c3a7bf2eba1f1c4e4d6f57832d73003919a20118870ea659/rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228", size = 358355, upload-time = "2025-08-27T12:13:54.012Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/6f/bf142541229374287604caf3bb2a4ae17f0a580798fd72d3b009b532db4e/rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92", size = 342138, upload-time = "2025-08-27T12:13:55.791Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/77/355b1c041d6be40886c44ff5e798b4e2769e497b790f0f7fd1e78d17e9a8/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2", size = 380247, upload-time = "2025-08-27T12:13:57.683Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/a4/d9cef5c3946ea271ce2243c51481971cd6e34f21925af2783dd17b26e815/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723", size = 390699, upload-time = "2025-08-27T12:13:59.137Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/06/005106a7b8c6c1a7e91b73169e49870f4af5256119d34a361ae5240a0c1d/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802", size = 521852, upload-time = "2025-08-27T12:14:00.583Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/3e/50fb1dac0948e17a02eb05c24510a8fe12d5ce8561c6b7b7d1339ab7ab9c/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f", size = 402582, upload-time = "2025-08-27T12:14:02.034Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/b0/f4e224090dc5b0ec15f31a02d746ab24101dd430847c4d99123798661bfc/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2", size = 384126, upload-time = "2025-08-27T12:14:03.437Z" },
+ { url = "https://files.pythonhosted.org/packages/54/77/ac339d5f82b6afff1df8f0fe0d2145cc827992cb5f8eeb90fc9f31ef7a63/rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21", size = 399486, upload-time = "2025-08-27T12:14:05.443Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/29/3e1c255eee6ac358c056a57d6d6869baa00a62fa32eea5ee0632039c50a3/rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef", size = 414832, upload-time = "2025-08-27T12:14:06.902Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/db/6d498b844342deb3fa1d030598db93937a9964fcf5cb4da4feb5f17be34b/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081", size = 557249, upload-time = "2025-08-27T12:14:08.37Z" },
+ { url = "https://files.pythonhosted.org/packages/60/f3/690dd38e2310b6f68858a331399b4d6dbb9132c3e8ef8b4333b96caf403d/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd", size = 587356, upload-time = "2025-08-27T12:14:10.034Z" },
+ { url = "https://files.pythonhosted.org/packages/86/e3/84507781cccd0145f35b1dc32c72675200c5ce8d5b30f813e49424ef68fc/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7", size = 555300, upload-time = "2025-08-27T12:14:11.783Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/ee/375469849e6b429b3516206b4580a79e9ef3eb12920ddbd4492b56eaacbe/rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688", size = 216714, upload-time = "2025-08-27T12:14:13.629Z" },
+ { url = "https://files.pythonhosted.org/packages/21/87/3fc94e47c9bd0742660e84706c311a860dcae4374cf4a03c477e23ce605a/rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797", size = 228943, upload-time = "2025-08-27T12:14:14.937Z" },
+ { url = "https://files.pythonhosted.org/packages/70/36/b6e6066520a07cf029d385de869729a895917b411e777ab1cde878100a1d/rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334", size = 362472, upload-time = "2025-08-27T12:14:16.333Z" },
+ { url = "https://files.pythonhosted.org/packages/af/07/b4646032e0dcec0df9c73a3bd52f63bc6c5f9cda992f06bd0e73fe3fbebd/rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33", size = 345676, upload-time = "2025-08-27T12:14:17.764Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/16/2f1003ee5d0af4bcb13c0cf894957984c32a6751ed7206db2aee7379a55e/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a", size = 385313, upload-time = "2025-08-27T12:14:19.829Z" },
+ { url = "https://files.pythonhosted.org/packages/05/cd/7eb6dd7b232e7f2654d03fa07f1414d7dfc980e82ba71e40a7c46fd95484/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b", size = 399080, upload-time = "2025-08-27T12:14:21.531Z" },
+ { url = "https://files.pythonhosted.org/packages/20/51/5829afd5000ec1cb60f304711f02572d619040aa3ec033d8226817d1e571/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7", size = 523868, upload-time = "2025-08-27T12:14:23.485Z" },
+ { url = "https://files.pythonhosted.org/packages/05/2c/30eebca20d5db95720ab4d2faec1b5e4c1025c473f703738c371241476a2/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136", size = 408750, upload-time = "2025-08-27T12:14:24.924Z" },
+ { url = "https://files.pythonhosted.org/packages/90/1a/cdb5083f043597c4d4276eae4e4c70c55ab5accec078da8611f24575a367/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff", size = 387688, upload-time = "2025-08-27T12:14:27.537Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/92/cf786a15320e173f945d205ab31585cc43969743bb1a48b6888f7a2b0a2d/rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9", size = 407225, upload-time = "2025-08-27T12:14:28.981Z" },
+ { url = "https://files.pythonhosted.org/packages/33/5c/85ee16df5b65063ef26017bef33096557a4c83fbe56218ac7cd8c235f16d/rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60", size = 423361, upload-time = "2025-08-27T12:14:30.469Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/8e/1c2741307fcabd1a334ecf008e92c4f47bb6f848712cf15c923becfe82bb/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e", size = 562493, upload-time = "2025-08-27T12:14:31.987Z" },
+ { url = "https://files.pythonhosted.org/packages/04/03/5159321baae9b2222442a70c1f988cbbd66b9be0675dd3936461269be360/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212", size = 592623, upload-time = "2025-08-27T12:14:33.543Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/39/c09fd1ad28b85bc1d4554a8710233c9f4cefd03d7717a1b8fbfd171d1167/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675", size = 558800, upload-time = "2025-08-27T12:14:35.436Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/d6/99228e6bbcf4baa764b18258f519a9035131d91b538d4e0e294313462a98/rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3", size = 221943, upload-time = "2025-08-27T12:14:36.898Z" },
+ { url = "https://files.pythonhosted.org/packages/be/07/c802bc6b8e95be83b79bdf23d1aa61d68324cb1006e245d6c58e959e314d/rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456", size = 233739, upload-time = "2025-08-27T12:14:38.386Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/89/3e1b1c16d4c2d547c5717377a8df99aee8099ff050f87c45cb4d5fa70891/rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3", size = 223120, upload-time = "2025-08-27T12:14:39.82Z" },
+ { url = "https://files.pythonhosted.org/packages/62/7e/dc7931dc2fa4a6e46b2a4fa744a9fe5c548efd70e0ba74f40b39fa4a8c10/rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2", size = 358944, upload-time = "2025-08-27T12:14:41.199Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/22/4af76ac4e9f336bfb1a5f240d18a33c6b2fcaadb7472ac7680576512b49a/rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4", size = 342283, upload-time = "2025-08-27T12:14:42.699Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/15/2a7c619b3c2272ea9feb9ade67a45c40b3eeb500d503ad4c28c395dc51b4/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e", size = 380320, upload-time = "2025-08-27T12:14:44.157Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/7d/4c6d243ba4a3057e994bb5bedd01b5c963c12fe38dde707a52acdb3849e7/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817", size = 391760, upload-time = "2025-08-27T12:14:45.845Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/71/b19401a909b83bcd67f90221330bc1ef11bc486fe4e04c24388d28a618ae/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec", size = 522476, upload-time = "2025-08-27T12:14:47.364Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/44/1a3b9715c0455d2e2f0f6df5ee6d6f5afdc423d0773a8a682ed2b43c566c/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a", size = 403418, upload-time = "2025-08-27T12:14:49.991Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/4b/fb6c4f14984eb56673bc868a66536f53417ddb13ed44b391998100a06a96/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8", size = 384771, upload-time = "2025-08-27T12:14:52.159Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/56/d5265d2d28b7420d7b4d4d85cad8ef891760f5135102e60d5c970b976e41/rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48", size = 400022, upload-time = "2025-08-27T12:14:53.859Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/e9/9f5fc70164a569bdd6ed9046486c3568d6926e3a49bdefeeccfb18655875/rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb", size = 416787, upload-time = "2025-08-27T12:14:55.673Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/64/56dd03430ba491db943a81dcdef115a985aac5f44f565cd39a00c766d45c/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734", size = 557538, upload-time = "2025-08-27T12:14:57.245Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/36/92cc885a3129993b1d963a2a42ecf64e6a8e129d2c7cc980dbeba84e55fb/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb", size = 588512, upload-time = "2025-08-27T12:14:58.728Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/10/6b283707780a81919f71625351182b4f98932ac89a09023cb61865136244/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0", size = 555813, upload-time = "2025-08-27T12:15:00.334Z" },
+ { url = "https://files.pythonhosted.org/packages/04/2e/30b5ea18c01379da6272a92825dd7e53dc9d15c88a19e97932d35d430ef7/rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a", size = 217385, upload-time = "2025-08-27T12:15:01.937Z" },
+ { url = "https://files.pythonhosted.org/packages/32/7d/97119da51cb1dd3f2f3c0805f155a3aa4a95fa44fe7d78ae15e69edf4f34/rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772", size = 230097, upload-time = "2025-08-27T12:15:03.961Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/63/b7cc415c345625d5e62f694ea356c58fb964861409008118f1245f8c3347/rpds_py-0.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7ba22cb9693df986033b91ae1d7a979bc399237d45fccf875b76f62bb9e52ddf", size = 371360, upload-time = "2025-08-27T12:15:29.218Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/8c/12e1b24b560cf378b8ffbdb9dc73abd529e1adcfcf82727dfd29c4a7b88d/rpds_py-0.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b640501be9288c77738b5492b3fd3abc4ba95c50c2e41273c8a1459f08298d3", size = 353933, upload-time = "2025-08-27T12:15:30.837Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/85/1bb2210c1f7a1b99e91fea486b9f0f894aa5da3a5ec7097cbad7dec6d40f/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb08b65b93e0c6dd70aac7f7890a9c0938d5ec71d5cb32d45cf844fb8ae47636", size = 382962, upload-time = "2025-08-27T12:15:32.348Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/c9/a839b9f219cf80ed65f27a7f5ddbb2809c1b85c966020ae2dff490e0b18e/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d7ff07d696a7a38152ebdb8212ca9e5baab56656749f3d6004b34ab726b550b8", size = 394412, upload-time = "2025-08-27T12:15:33.839Z" },
+ { url = "https://files.pythonhosted.org/packages/02/2d/b1d7f928b0b1f4fc2e0133e8051d199b01d7384875adc63b6ddadf3de7e5/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb7c72262deae25366e3b6c0c0ba46007967aea15d1eea746e44ddba8ec58dcc", size = 523972, upload-time = "2025-08-27T12:15:35.377Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/af/2cbf56edd2d07716df1aec8a726b3159deb47cb5c27e1e42b71d705a7c2f/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b002cab05d6339716b03a4a3a2ce26737f6231d7b523f339fa061d53368c9d8", size = 403273, upload-time = "2025-08-27T12:15:37.051Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/93/425e32200158d44ff01da5d9612c3b6711fe69f606f06e3895511f17473b/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23f6b69d1c26c4704fec01311963a41d7de3ee0570a84ebde4d544e5a1859ffc", size = 385278, upload-time = "2025-08-27T12:15:38.571Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/1a/1a04a915ecd0551bfa9e77b7672d1937b4b72a0fc204a17deef76001cfb2/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:530064db9146b247351f2a0250b8f00b289accea4596a033e94be2389977de71", size = 402084, upload-time = "2025-08-27T12:15:40.529Z" },
+ { url = "https://files.pythonhosted.org/packages/51/f7/66585c0fe5714368b62951d2513b684e5215beaceab2c6629549ddb15036/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b90b0496570bd6b0321724a330d8b545827c4df2034b6ddfc5f5275f55da2ad", size = 419041, upload-time = "2025-08-27T12:15:42.191Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/7e/83a508f6b8e219bba2d4af077c35ba0e0cdd35a751a3be6a7cba5a55ad71/rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879b0e14a2da6a1102a3fc8af580fc1ead37e6d6692a781bd8c83da37429b5ab", size = 560084, upload-time = "2025-08-27T12:15:43.839Z" },
+ { url = "https://files.pythonhosted.org/packages/66/66/bb945683b958a1b19eb0fe715594630d0f36396ebdef4d9b89c2fa09aa56/rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:0d807710df3b5faa66c731afa162ea29717ab3be17bdc15f90f2d9f183da4059", size = 590115, upload-time = "2025-08-27T12:15:46.647Z" },
+ { url = "https://files.pythonhosted.org/packages/12/00/ccfaafaf7db7e7adace915e5c2f2c2410e16402561801e9c7f96683002d3/rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3adc388fc3afb6540aec081fa59e6e0d3908722771aa1e37ffe22b220a436f0b", size = 556561, upload-time = "2025-08-27T12:15:48.219Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/b7/92b6ed9aad103bfe1c45df98453dfae40969eef2cb6c6239c58d7e96f1b3/rpds_py-0.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c796c0c1cc68cb08b0284db4229f5af76168172670c74908fdbd4b7d7f515819", size = 229125, upload-time = "2025-08-27T12:15:49.956Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/ed/e1fba02de17f4f76318b834425257c8ea297e415e12c68b4361f63e8ae92/rpds_py-0.27.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdfe4bb2f9fe7458b7453ad3c33e726d6d1c7c0a72960bcc23800d77384e42df", size = 371402, upload-time = "2025-08-27T12:15:51.561Z" },
+ { url = "https://files.pythonhosted.org/packages/af/7c/e16b959b316048b55585a697e94add55a4ae0d984434d279ea83442e460d/rpds_py-0.27.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8fabb8fd848a5f75a2324e4a84501ee3a5e3c78d8603f83475441866e60b94a3", size = 354084, upload-time = "2025-08-27T12:15:53.219Z" },
+ { url = "https://files.pythonhosted.org/packages/de/c1/ade645f55de76799fdd08682d51ae6724cb46f318573f18be49b1e040428/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda8719d598f2f7f3e0f885cba8646644b55a187762bec091fa14a2b819746a9", size = 383090, upload-time = "2025-08-27T12:15:55.158Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/27/89070ca9b856e52960da1472efcb6c20ba27cfe902f4f23ed095b9cfc61d/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c64d07e95606ec402a0a1c511fe003873fa6af630bda59bac77fac8b4318ebc", size = 394519, upload-time = "2025-08-27T12:15:57.238Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/28/be120586874ef906aa5aeeae95ae8df4184bc757e5b6bd1c729ccff45ed5/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93a2ed40de81bcff59aabebb626562d48332f3d028ca2036f1d23cbb52750be4", size = 523817, upload-time = "2025-08-27T12:15:59.237Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/ef/70cc197bc11cfcde02a86f36ac1eed15c56667c2ebddbdb76a47e90306da/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:387ce8c44ae94e0ec50532d9cb0edce17311024c9794eb196b90e1058aadeb66", size = 403240, upload-time = "2025-08-27T12:16:00.923Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/35/46936cca449f7f518f2f4996e0e8344db4b57e2081e752441154089d2a5f/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf94f812c95b5e60ebaf8bfb1898a7d7cb9c1af5744d4a67fa47796e0465d4e", size = 385194, upload-time = "2025-08-27T12:16:02.802Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/62/29c0d3e5125c3270b51415af7cbff1ec587379c84f55a5761cc9efa8cd06/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4848ca84d6ded9b58e474dfdbad4b8bfb450344c0551ddc8d958bf4b36aa837c", size = 402086, upload-time = "2025-08-27T12:16:04.806Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/66/03e1087679227785474466fdd04157fb793b3b76e3fcf01cbf4c693c1949/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2bde09cbcf2248b73c7c323be49b280180ff39fadcfe04e7b6f54a678d02a7cf", size = 419272, upload-time = "2025-08-27T12:16:06.471Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/24/e3e72d265121e00b063aef3e3501e5b2473cf1b23511d56e529531acf01e/rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:94c44ee01fd21c9058f124d2d4f0c9dc7634bec93cd4b38eefc385dabe71acbf", size = 560003, upload-time = "2025-08-27T12:16:08.06Z" },
+ { url = "https://files.pythonhosted.org/packages/26/ca/f5a344c534214cc2d41118c0699fffbdc2c1bc7046f2a2b9609765ab9c92/rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:df8b74962e35c9249425d90144e721eed198e6555a0e22a563d29fe4486b51f6", size = 590482, upload-time = "2025-08-27T12:16:10.137Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/08/4349bdd5c64d9d193c360aa9db89adeee6f6682ab8825dca0a3f535f434f/rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:dc23e6820e3b40847e2f4a7726462ba0cf53089512abe9ee16318c366494c17a", size = 556523, upload-time = "2025-08-27T12:16:12.188Z" },
+]
+
+[[package]]
+name = "ruff"
+version = "0.12.12"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a8/f0/e0965dd709b8cabe6356811c0ee8c096806bb57d20b5019eb4e48a117410/ruff-0.12.12.tar.gz", hash = "sha256:b86cd3415dbe31b3b46a71c598f4c4b2f550346d1ccf6326b347cc0c8fd063d6", size = 5359915, upload-time = "2025-09-04T16:50:18.273Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/09/79/8d3d687224d88367b51c7974cec1040c4b015772bfbeffac95face14c04a/ruff-0.12.12-py3-none-linux_armv6l.whl", hash = "sha256:de1c4b916d98ab289818e55ce481e2cacfaad7710b01d1f990c497edf217dafc", size = 12116602, upload-time = "2025-09-04T16:49:18.892Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/c3/6e599657fe192462f94861a09aae935b869aea8a1da07f47d6eae471397c/ruff-0.12.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7acd6045e87fac75a0b0cdedacf9ab3e1ad9d929d149785903cff9bb69ad9727", size = 12868393, upload-time = "2025-09-04T16:49:23.043Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/d2/9e3e40d399abc95336b1843f52fc0daaceb672d0e3c9290a28ff1a96f79d/ruff-0.12.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:abf4073688d7d6da16611f2f126be86523a8ec4343d15d276c614bda8ec44edb", size = 12036967, upload-time = "2025-09-04T16:49:26.04Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/03/6816b2ed08836be272e87107d905f0908be5b4a40c14bfc91043e76631b8/ruff-0.12.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:968e77094b1d7a576992ac078557d1439df678a34c6fe02fd979f973af167577", size = 12276038, upload-time = "2025-09-04T16:49:29.056Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/d5/707b92a61310edf358a389477eabd8af68f375c0ef858194be97ca5b6069/ruff-0.12.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42a67d16e5b1ffc6d21c5f67851e0e769517fb57a8ebad1d0781b30888aa704e", size = 11901110, upload-time = "2025-09-04T16:49:32.07Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/3d/f8b1038f4b9822e26ec3d5b49cf2bc313e3c1564cceb4c1a42820bf74853/ruff-0.12.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b216ec0a0674e4b1214dcc998a5088e54eaf39417327b19ffefba1c4a1e4971e", size = 13668352, upload-time = "2025-09-04T16:49:35.148Z" },
+ { url = "https://files.pythonhosted.org/packages/98/0e/91421368ae6c4f3765dd41a150f760c5f725516028a6be30e58255e3c668/ruff-0.12.12-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:59f909c0fdd8f1dcdbfed0b9569b8bf428cf144bec87d9de298dcd4723f5bee8", size = 14638365, upload-time = "2025-09-04T16:49:38.892Z" },
+ { url = "https://files.pythonhosted.org/packages/74/5d/88f3f06a142f58ecc8ecb0c2fe0b82343e2a2b04dcd098809f717cf74b6c/ruff-0.12.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ac93d87047e765336f0c18eacad51dad0c1c33c9df7484c40f98e1d773876f5", size = 14060812, upload-time = "2025-09-04T16:49:42.732Z" },
+ { url = "https://files.pythonhosted.org/packages/13/fc/8962e7ddd2e81863d5c92400820f650b86f97ff919c59836fbc4c1a6d84c/ruff-0.12.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:01543c137fd3650d322922e8b14cc133b8ea734617c4891c5a9fccf4bfc9aa92", size = 13050208, upload-time = "2025-09-04T16:49:46.434Z" },
+ { url = "https://files.pythonhosted.org/packages/53/06/8deb52d48a9a624fd37390555d9589e719eac568c020b27e96eed671f25f/ruff-0.12.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afc2fa864197634e549d87fb1e7b6feb01df0a80fd510d6489e1ce8c0b1cc45", size = 13311444, upload-time = "2025-09-04T16:49:49.931Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/81/de5a29af7eb8f341f8140867ffb93f82e4fde7256dadee79016ac87c2716/ruff-0.12.12-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0c0945246f5ad776cb8925e36af2438e66188d2b57d9cf2eed2c382c58b371e5", size = 13279474, upload-time = "2025-09-04T16:49:53.465Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/14/d9577fdeaf791737ada1b4f5c6b59c21c3326f3f683229096cccd7674e0c/ruff-0.12.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:a0fbafe8c58e37aae28b84a80ba1817f2ea552e9450156018a478bf1fa80f4e4", size = 12070204, upload-time = "2025-09-04T16:49:56.882Z" },
+ { url = "https://files.pythonhosted.org/packages/77/04/a910078284b47fad54506dc0af13839c418ff704e341c176f64e1127e461/ruff-0.12.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b9c456fb2fc8e1282affa932c9e40f5ec31ec9cbb66751a316bd131273b57c23", size = 11880347, upload-time = "2025-09-04T16:49:59.729Z" },
+ { url = "https://files.pythonhosted.org/packages/df/58/30185fcb0e89f05e7ea82e5817b47798f7fa7179863f9d9ba6fd4fe1b098/ruff-0.12.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5f12856123b0ad0147d90b3961f5c90e7427f9acd4b40050705499c98983f489", size = 12891844, upload-time = "2025-09-04T16:50:02.591Z" },
+ { url = "https://files.pythonhosted.org/packages/21/9c/28a8dacce4855e6703dcb8cdf6c1705d0b23dd01d60150786cd55aa93b16/ruff-0.12.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:26a1b5a2bf7dd2c47e3b46d077cd9c0fc3b93e6c6cc9ed750bd312ae9dc302ee", size = 13360687, upload-time = "2025-09-04T16:50:05.8Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/fa/05b6428a008e60f79546c943e54068316f32ec8ab5c4f73e4563934fbdc7/ruff-0.12.12-py3-none-win32.whl", hash = "sha256:173be2bfc142af07a01e3a759aba6f7791aa47acf3604f610b1c36db888df7b1", size = 12052870, upload-time = "2025-09-04T16:50:09.121Z" },
+ { url = "https://files.pythonhosted.org/packages/85/60/d1e335417804df452589271818749d061b22772b87efda88354cf35cdb7a/ruff-0.12.12-py3-none-win_amd64.whl", hash = "sha256:e99620bf01884e5f38611934c09dd194eb665b0109104acae3ba6102b600fd0d", size = 13178016, upload-time = "2025-09-04T16:50:12.559Z" },
+ { url = "https://files.pythonhosted.org/packages/28/7e/61c42657f6e4614a4258f1c3b0c5b93adc4d1f8575f5229d1906b483099b/ruff-0.12.12-py3-none-win_arm64.whl", hash = "sha256:2a8199cab4ce4d72d158319b63370abf60991495fb733db96cd923a34c52d093", size = 12256762, upload-time = "2025-09-04T16:50:15.737Z" },
+]
+
+[[package]]
+name = "safehttpx"
+version = "0.1.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "httpx" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/67/4c/19db75e6405692b2a96af8f06d1258f8aa7290bdc35ac966f03e207f6d7f/safehttpx-0.1.6.tar.gz", hash = "sha256:b356bfc82cee3a24c395b94a2dbeabbed60aff1aa5fa3b5fe97c4f2456ebce42", size = 9987, upload-time = "2024-12-02T18:44:10.226Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl", hash = "sha256:407cff0b410b071623087c63dd2080c3b44dc076888d8c5823c00d1e58cb381c", size = 8692, upload-time = "2024-12-02T18:44:08.555Z" },
+]
+
+[[package]]
+name = "safetensors"
+version = "0.6.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ac/cc/738f3011628920e027a11754d9cae9abec1aed00f7ae860abbf843755233/safetensors-0.6.2.tar.gz", hash = "sha256:43ff2aa0e6fa2dc3ea5524ac7ad93a9839256b8703761e76e2d0b2a3fa4f15d9", size = 197968, upload-time = "2025-08-08T13:13:58.654Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4d/b1/3f5fd73c039fc87dba3ff8b5d528bfc5a32b597fea8e7a6a4800343a17c7/safetensors-0.6.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:9c85ede8ec58f120bad982ec47746981e210492a6db876882aa021446af8ffba", size = 454797, upload-time = "2025-08-08T13:13:52.066Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/c9/bb114c158540ee17907ec470d01980957fdaf87b4aa07914c24eba87b9c6/safetensors-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d6675cf4b39c98dbd7d940598028f3742e0375a6b4d4277e76beb0c35f4b843b", size = 432206, upload-time = "2025-08-08T13:13:50.931Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/8e/f70c34e47df3110e8e0bb268d90db8d4be8958a54ab0336c9be4fe86dac8/safetensors-0.6.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d2d2b3ce1e2509c68932ca03ab8f20570920cd9754b05063d4368ee52833ecd", size = 473261, upload-time = "2025-08-08T13:13:41.259Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/f5/be9c6a7c7ef773e1996dc214e73485286df1836dbd063e8085ee1976f9cb/safetensors-0.6.2-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:93de35a18f46b0f5a6a1f9e26d91b442094f2df02e9fd7acf224cfec4238821a", size = 485117, upload-time = "2025-08-08T13:13:43.506Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/55/23f2d0a2c96ed8665bf17a30ab4ce5270413f4d74b6d87dd663258b9af31/safetensors-0.6.2-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89a89b505f335640f9120fac65ddeb83e40f1fd081cb8ed88b505bdccec8d0a1", size = 616154, upload-time = "2025-08-08T13:13:45.096Z" },
+ { url = "https://files.pythonhosted.org/packages/98/c6/affb0bd9ce02aa46e7acddbe087912a04d953d7a4d74b708c91b5806ef3f/safetensors-0.6.2-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc4d0d0b937e04bdf2ae6f70cd3ad51328635fe0e6214aa1fc811f3b576b3bda", size = 520713, upload-time = "2025-08-08T13:13:46.25Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/5d/5a514d7b88e310c8b146e2404e0dc161282e78634d9358975fd56dfd14be/safetensors-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8045db2c872db8f4cbe3faa0495932d89c38c899c603f21e9b6486951a5ecb8f", size = 485835, upload-time = "2025-08-08T13:13:49.373Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/7b/4fc3b2ba62c352b2071bea9cfbad330fadda70579f617506ae1a2f129cab/safetensors-0.6.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:81e67e8bab9878bb568cffbc5f5e655adb38d2418351dc0859ccac158f753e19", size = 521503, upload-time = "2025-08-08T13:13:47.651Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/50/0057e11fe1f3cead9254315a6c106a16dd4b1a19cd247f7cc6414f6b7866/safetensors-0.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b0e4d029ab0a0e0e4fdf142b194514695b1d7d3735503ba700cf36d0fc7136ce", size = 652256, upload-time = "2025-08-08T13:13:53.167Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/29/473f789e4ac242593ac1656fbece6e1ecd860bb289e635e963667807afe3/safetensors-0.6.2-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:fa48268185c52bfe8771e46325a1e21d317207bcabcb72e65c6e28e9ffeb29c7", size = 747281, upload-time = "2025-08-08T13:13:54.656Z" },
+ { url = "https://files.pythonhosted.org/packages/68/52/f7324aad7f2df99e05525c84d352dc217e0fa637a4f603e9f2eedfbe2c67/safetensors-0.6.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:d83c20c12c2d2f465997c51b7ecb00e407e5f94d7dec3ea0cc11d86f60d3fde5", size = 692286, upload-time = "2025-08-08T13:13:55.884Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/fe/cad1d9762868c7c5dc70c8620074df28ebb1a8e4c17d4c0cb031889c457e/safetensors-0.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d944cea65fad0ead848b6ec2c37cc0b197194bec228f8020054742190e9312ac", size = 655957, upload-time = "2025-08-08T13:13:57.029Z" },
+ { url = "https://files.pythonhosted.org/packages/59/a7/e2158e17bbe57d104f0abbd95dff60dda916cf277c9f9663b4bf9bad8b6e/safetensors-0.6.2-cp38-abi3-win32.whl", hash = "sha256:cab75ca7c064d3911411461151cb69380c9225798a20e712b102edda2542ddb1", size = 308926, upload-time = "2025-08-08T13:14:01.095Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/c3/c0be1135726618dc1e28d181b8c442403d8dbb9e273fd791de2d4384bcdd/safetensors-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:c7b214870df923cbc1593c3faee16bec59ea462758699bd3fee399d00aac072c", size = 320192, upload-time = "2025-08-08T13:13:59.467Z" },
+]
+
+[[package]]
+name = "scipy"
+version = "1.15.3"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.11' and sys_platform == 'linux'",
+ "python_full_version < '3.11' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "numpy", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0f/37/6964b830433e654ec7485e45a00fc9a27cf868d622838f6b6d9c5ec0d532/scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf", size = 59419214, upload-time = "2025-05-08T16:13:05.955Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/78/2f/4966032c5f8cc7e6a60f1b2e0ad686293b9474b65246b0c642e3ef3badd0/scipy-1.15.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a345928c86d535060c9c2b25e71e87c39ab2f22fc96e9636bd74d1dbf9de448c", size = 38702770, upload-time = "2025-05-08T16:04:20.849Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/6e/0c3bf90fae0e910c274db43304ebe25a6b391327f3f10b5dcc638c090795/scipy-1.15.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ad3432cb0f9ed87477a8d97f03b763fd1d57709f1bbde3c9369b1dff5503b253", size = 30094511, upload-time = "2025-05-08T16:04:27.103Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/b1/4deb37252311c1acff7f101f6453f0440794f51b6eacb1aad4459a134081/scipy-1.15.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:aef683a9ae6eb00728a542b796f52a5477b78252edede72b8327a886ab63293f", size = 22368151, upload-time = "2025-05-08T16:04:31.731Z" },
+ { url = "https://files.pythonhosted.org/packages/38/7d/f457626e3cd3c29b3a49ca115a304cebb8cc6f31b04678f03b216899d3c6/scipy-1.15.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:1c832e1bd78dea67d5c16f786681b28dd695a8cb1fb90af2e27580d3d0967e92", size = 25121732, upload-time = "2025-05-08T16:04:36.596Z" },
+ { url = "https://files.pythonhosted.org/packages/db/0a/92b1de4a7adc7a15dcf5bddc6e191f6f29ee663b30511ce20467ef9b82e4/scipy-1.15.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:263961f658ce2165bbd7b99fa5135195c3a12d9bef045345016b8b50c315cb82", size = 35547617, upload-time = "2025-05-08T16:04:43.546Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/6d/41991e503e51fc1134502694c5fa7a1671501a17ffa12716a4a9151af3df/scipy-1.15.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2abc762b0811e09a0d3258abee2d98e0c703eee49464ce0069590846f31d40", size = 37662964, upload-time = "2025-05-08T16:04:49.431Z" },
+ { url = "https://files.pythonhosted.org/packages/25/e1/3df8f83cb15f3500478c889be8fb18700813b95e9e087328230b98d547ff/scipy-1.15.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ed7284b21a7a0c8f1b6e5977ac05396c0d008b89e05498c8b7e8f4a1423bba0e", size = 37238749, upload-time = "2025-05-08T16:04:55.215Z" },
+ { url = "https://files.pythonhosted.org/packages/93/3e/b3257cf446f2a3533ed7809757039016b74cd6f38271de91682aa844cfc5/scipy-1.15.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5380741e53df2c566f4d234b100a484b420af85deb39ea35a1cc1be84ff53a5c", size = 40022383, upload-time = "2025-05-08T16:05:01.914Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/84/55bc4881973d3f79b479a5a2e2df61c8c9a04fcb986a213ac9c02cfb659b/scipy-1.15.3-cp310-cp310-win_amd64.whl", hash = "sha256:9d61e97b186a57350f6d6fd72640f9e99d5a4a2b8fbf4b9ee9a841eab327dc13", size = 41259201, upload-time = "2025-05-08T16:05:08.166Z" },
+ { url = "https://files.pythonhosted.org/packages/96/ab/5cc9f80f28f6a7dff646c5756e559823614a42b1939d86dd0ed550470210/scipy-1.15.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:993439ce220d25e3696d1b23b233dd010169b62f6456488567e830654ee37a6b", size = 38714255, upload-time = "2025-05-08T16:05:14.596Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/4a/66ba30abe5ad1a3ad15bfb0b59d22174012e8056ff448cb1644deccbfed2/scipy-1.15.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:34716e281f181a02341ddeaad584205bd2fd3c242063bd3423d61ac259ca7eba", size = 30111035, upload-time = "2025-05-08T16:05:20.152Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/fa/a7e5b95afd80d24313307f03624acc65801846fa75599034f8ceb9e2cbf6/scipy-1.15.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3b0334816afb8b91dab859281b1b9786934392aa3d527cd847e41bb6f45bee65", size = 22384499, upload-time = "2025-05-08T16:05:24.494Z" },
+ { url = "https://files.pythonhosted.org/packages/17/99/f3aaddccf3588bb4aea70ba35328c204cadd89517a1612ecfda5b2dd9d7a/scipy-1.15.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:6db907c7368e3092e24919b5e31c76998b0ce1684d51a90943cb0ed1b4ffd6c1", size = 25152602, upload-time = "2025-05-08T16:05:29.313Z" },
+ { url = "https://files.pythonhosted.org/packages/56/c5/1032cdb565f146109212153339f9cb8b993701e9fe56b1c97699eee12586/scipy-1.15.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721d6b4ef5dc82ca8968c25b111e307083d7ca9091bc38163fb89243e85e3889", size = 35503415, upload-time = "2025-05-08T16:05:34.699Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/37/89f19c8c05505d0601ed5650156e50eb881ae3918786c8fd7262b4ee66d3/scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39cb9c62e471b1bb3750066ecc3a3f3052b37751c7c3dfd0fd7e48900ed52982", size = 37652622, upload-time = "2025-05-08T16:05:40.762Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/31/be59513aa9695519b18e1851bb9e487de66f2d31f835201f1b42f5d4d475/scipy-1.15.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:795c46999bae845966368a3c013e0e00947932d68e235702b5c3f6ea799aa8c9", size = 37244796, upload-time = "2025-05-08T16:05:48.119Z" },
+ { url = "https://files.pythonhosted.org/packages/10/c0/4f5f3eeccc235632aab79b27a74a9130c6c35df358129f7ac8b29f562ac7/scipy-1.15.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:18aaacb735ab38b38db42cb01f6b92a2d0d4b6aabefeb07f02849e47f8fb3594", size = 40047684, upload-time = "2025-05-08T16:05:54.22Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/a7/0ddaf514ce8a8714f6ed243a2b391b41dbb65251affe21ee3077ec45ea9a/scipy-1.15.3-cp311-cp311-win_amd64.whl", hash = "sha256:ae48a786a28412d744c62fd7816a4118ef97e5be0bee968ce8f0a2fba7acf3bb", size = 41246504, upload-time = "2025-05-08T16:06:00.437Z" },
+ { url = "https://files.pythonhosted.org/packages/37/4b/683aa044c4162e10ed7a7ea30527f2cbd92e6999c10a8ed8edb253836e9c/scipy-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac6310fdbfb7aa6612408bd2f07295bcbd3fda00d2d702178434751fe48e019", size = 38766735, upload-time = "2025-05-08T16:06:06.471Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/7e/f30be3d03de07f25dc0ec926d1681fed5c732d759ac8f51079708c79e680/scipy-1.15.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:185cd3d6d05ca4b44a8f1595af87f9c372bb6acf9c808e99aa3e9aa03bd98cf6", size = 30173284, upload-time = "2025-05-08T16:06:11.686Z" },
+ { url = "https://files.pythonhosted.org/packages/07/9c/0ddb0d0abdabe0d181c1793db51f02cd59e4901da6f9f7848e1f96759f0d/scipy-1.15.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:05dc6abcd105e1a29f95eada46d4a3f251743cfd7d3ae8ddb4088047f24ea477", size = 22446958, upload-time = "2025-05-08T16:06:15.97Z" },
+ { url = "https://files.pythonhosted.org/packages/af/43/0bce905a965f36c58ff80d8bea33f1f9351b05fad4beaad4eae34699b7a1/scipy-1.15.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:06efcba926324df1696931a57a176c80848ccd67ce6ad020c810736bfd58eb1c", size = 25242454, upload-time = "2025-05-08T16:06:20.394Z" },
+ { url = "https://files.pythonhosted.org/packages/56/30/a6f08f84ee5b7b28b4c597aca4cbe545535c39fe911845a96414700b64ba/scipy-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05045d8b9bfd807ee1b9f38761993297b10b245f012b11b13b91ba8945f7e45", size = 35210199, upload-time = "2025-05-08T16:06:26.159Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/1f/03f52c282437a168ee2c7c14a1a0d0781a9a4a8962d84ac05c06b4c5b555/scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271e3713e645149ea5ea3e97b57fdab61ce61333f97cfae392c28ba786f9bb49", size = 37309455, upload-time = "2025-05-08T16:06:32.778Z" },
+ { url = "https://files.pythonhosted.org/packages/89/b1/fbb53137f42c4bf630b1ffdfc2151a62d1d1b903b249f030d2b1c0280af8/scipy-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cfd56fc1a8e53f6e89ba3a7a7251f7396412d655bca2aa5611c8ec9a6784a1e", size = 36885140, upload-time = "2025-05-08T16:06:39.249Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/2e/025e39e339f5090df1ff266d021892694dbb7e63568edcfe43f892fa381d/scipy-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ff17c0bb1cb32952c09217d8d1eed9b53d1463e5f1dd6052c7857f83127d539", size = 39710549, upload-time = "2025-05-08T16:06:45.729Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/eb/3bf6ea8ab7f1503dca3a10df2e4b9c3f6b3316df07f6c0ded94b281c7101/scipy-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:52092bc0472cfd17df49ff17e70624345efece4e1a12b23783a1ac59a1b728ed", size = 40966184, upload-time = "2025-05-08T16:06:52.623Z" },
+ { url = "https://files.pythonhosted.org/packages/73/18/ec27848c9baae6e0d6573eda6e01a602e5649ee72c27c3a8aad673ebecfd/scipy-1.15.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c620736bcc334782e24d173c0fdbb7590a0a436d2fdf39310a8902505008759", size = 38728256, upload-time = "2025-05-08T16:06:58.696Z" },
+ { url = "https://files.pythonhosted.org/packages/74/cd/1aef2184948728b4b6e21267d53b3339762c285a46a274ebb7863c9e4742/scipy-1.15.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:7e11270a000969409d37ed399585ee530b9ef6aa99d50c019de4cb01e8e54e62", size = 30109540, upload-time = "2025-05-08T16:07:04.209Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/d8/59e452c0a255ec352bd0a833537a3bc1bfb679944c4938ab375b0a6b3a3e/scipy-1.15.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8c9ed3ba2c8a2ce098163a9bdb26f891746d02136995df25227a20e71c396ebb", size = 22383115, upload-time = "2025-05-08T16:07:08.998Z" },
+ { url = "https://files.pythonhosted.org/packages/08/f5/456f56bbbfccf696263b47095291040655e3cbaf05d063bdc7c7517f32ac/scipy-1.15.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0bdd905264c0c9cfa74a4772cdb2070171790381a5c4d312c973382fc6eaf730", size = 25163884, upload-time = "2025-05-08T16:07:14.091Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/66/a9618b6a435a0f0c0b8a6d0a2efb32d4ec5a85f023c2b79d39512040355b/scipy-1.15.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79167bba085c31f38603e11a267d862957cbb3ce018d8b38f79ac043bc92d825", size = 35174018, upload-time = "2025-05-08T16:07:19.427Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/09/c5b6734a50ad4882432b6bb7c02baf757f5b2f256041da5df242e2d7e6b6/scipy-1.15.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9deabd6d547aee2c9a81dee6cc96c6d7e9a9b1953f74850c179f91fdc729cb7", size = 37269716, upload-time = "2025-05-08T16:07:25.712Z" },
+ { url = "https://files.pythonhosted.org/packages/77/0a/eac00ff741f23bcabd352731ed9b8995a0a60ef57f5fd788d611d43d69a1/scipy-1.15.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dde4fc32993071ac0c7dd2d82569e544f0bdaff66269cb475e0f369adad13f11", size = 36872342, upload-time = "2025-05-08T16:07:31.468Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/54/4379be86dd74b6ad81551689107360d9a3e18f24d20767a2d5b9253a3f0a/scipy-1.15.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f77f853d584e72e874d87357ad70f44b437331507d1c311457bed8ed2b956126", size = 39670869, upload-time = "2025-05-08T16:07:38.002Z" },
+ { url = "https://files.pythonhosted.org/packages/87/2e/892ad2862ba54f084ffe8cc4a22667eaf9c2bcec6d2bff1d15713c6c0703/scipy-1.15.3-cp313-cp313-win_amd64.whl", hash = "sha256:b90ab29d0c37ec9bf55424c064312930ca5f4bde15ee8619ee44e69319aab163", size = 40988851, upload-time = "2025-05-08T16:08:33.671Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/e9/7a879c137f7e55b30d75d90ce3eb468197646bc7b443ac036ae3fe109055/scipy-1.15.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3ac07623267feb3ae308487c260ac684b32ea35fd81e12845039952f558047b8", size = 38863011, upload-time = "2025-05-08T16:07:44.039Z" },
+ { url = "https://files.pythonhosted.org/packages/51/d1/226a806bbd69f62ce5ef5f3ffadc35286e9fbc802f606a07eb83bf2359de/scipy-1.15.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6487aa99c2a3d509a5227d9a5e889ff05830a06b2ce08ec30df6d79db5fcd5c5", size = 30266407, upload-time = "2025-05-08T16:07:49.891Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/9b/f32d1d6093ab9eeabbd839b0f7619c62e46cc4b7b6dbf05b6e615bbd4400/scipy-1.15.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:50f9e62461c95d933d5c5ef4a1f2ebf9a2b4e83b0db374cb3f1de104d935922e", size = 22540030, upload-time = "2025-05-08T16:07:54.121Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/29/c278f699b095c1a884f29fda126340fcc201461ee8bfea5c8bdb1c7c958b/scipy-1.15.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14ed70039d182f411ffc74789a16df3835e05dc469b898233a245cdfd7f162cb", size = 25218709, upload-time = "2025-05-08T16:07:58.506Z" },
+ { url = "https://files.pythonhosted.org/packages/24/18/9e5374b617aba742a990581373cd6b68a2945d65cc588482749ef2e64467/scipy-1.15.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a769105537aa07a69468a0eefcd121be52006db61cdd8cac8a0e68980bbb723", size = 34809045, upload-time = "2025-05-08T16:08:03.929Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/fe/9c4361e7ba2927074360856db6135ef4904d505e9b3afbbcb073c4008328/scipy-1.15.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db984639887e3dffb3928d118145ffe40eff2fa40cb241a306ec57c219ebbbb", size = 36703062, upload-time = "2025-05-08T16:08:09.558Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/8e/038ccfe29d272b30086b25a4960f757f97122cb2ec42e62b460d02fe98e9/scipy-1.15.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:40e54d5c7e7ebf1aa596c374c49fa3135f04648a0caabcb66c52884b943f02b4", size = 36393132, upload-time = "2025-05-08T16:08:15.34Z" },
+ { url = "https://files.pythonhosted.org/packages/10/7e/5c12285452970be5bdbe8352c619250b97ebf7917d7a9a9e96b8a8140f17/scipy-1.15.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5e721fed53187e71d0ccf382b6bf977644c533e506c4d33c3fb24de89f5c3ed5", size = 38979503, upload-time = "2025-05-08T16:08:21.513Z" },
+ { url = "https://files.pythonhosted.org/packages/81/06/0a5e5349474e1cbc5757975b21bd4fad0e72ebf138c5592f191646154e06/scipy-1.15.3-cp313-cp313t-win_amd64.whl", hash = "sha256:76ad1fb5f8752eabf0fa02e4cc0336b4e8f021e2d5f061ed37d6d264db35e3ca", size = 40308097, upload-time = "2025-05-08T16:08:27.627Z" },
+]
+
+[[package]]
+name = "scipy"
+version = "1.16.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.13' and sys_platform == 'linux'",
+ "python_full_version == '3.12.*' and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and sys_platform == 'linux'",
+ "python_full_version >= '3.13' and sys_platform != 'linux'",
+ "python_full_version == '3.12.*' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "numpy", marker = "python_full_version >= '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f5/4a/b927028464795439faec8eaf0b03b011005c487bb2d07409f28bf30879c4/scipy-1.16.1.tar.gz", hash = "sha256:44c76f9e8b6e8e488a586190ab38016e4ed2f8a038af7cd3defa903c0a2238b3", size = 30580861, upload-time = "2025-07-27T16:33:30.834Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/da/91/812adc6f74409b461e3a5fa97f4f74c769016919203138a3bf6fc24ba4c5/scipy-1.16.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:c033fa32bab91dc98ca59d0cf23bb876454e2bb02cbe592d5023138778f70030", size = 36552519, upload-time = "2025-07-27T16:26:29.658Z" },
+ { url = "https://files.pythonhosted.org/packages/47/18/8e355edcf3b71418d9e9f9acd2708cc3a6c27e8f98fde0ac34b8a0b45407/scipy-1.16.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:6e5c2f74e5df33479b5cd4e97a9104c511518fbd979aa9b8f6aec18b2e9ecae7", size = 28638010, upload-time = "2025-07-27T16:26:38.196Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/eb/e931853058607bdfbc11b86df19ae7a08686121c203483f62f1ecae5989c/scipy-1.16.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:0a55ffe0ba0f59666e90951971a884d1ff6f4ec3275a48f472cfb64175570f77", size = 20909790, upload-time = "2025-07-27T16:26:43.93Z" },
+ { url = "https://files.pythonhosted.org/packages/45/0c/be83a271d6e96750cd0be2e000f35ff18880a46f05ce8b5d3465dc0f7a2a/scipy-1.16.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:f8a5d6cd147acecc2603fbd382fed6c46f474cccfcf69ea32582e033fb54dcfe", size = 23513352, upload-time = "2025-07-27T16:26:50.017Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/bf/fe6eb47e74f762f933cca962db7f2c7183acfdc4483bd1c3813cfe83e538/scipy-1.16.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cb18899127278058bcc09e7b9966d41a5a43740b5bb8dcba401bd983f82e885b", size = 33534643, upload-time = "2025-07-27T16:26:57.503Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/ba/63f402e74875486b87ec6506a4f93f6d8a0d94d10467280f3d9d7837ce3a/scipy-1.16.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adccd93a2fa937a27aae826d33e3bfa5edf9aa672376a4852d23a7cd67a2e5b7", size = 35376776, upload-time = "2025-07-27T16:27:06.639Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/b4/04eb9d39ec26a1b939689102da23d505ea16cdae3dbb18ffc53d1f831044/scipy-1.16.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:18aca1646a29ee9a0625a1be5637fa798d4d81fdf426481f06d69af828f16958", size = 35698906, upload-time = "2025-07-27T16:27:14.943Z" },
+ { url = "https://files.pythonhosted.org/packages/04/d6/bb5468da53321baeb001f6e4e0d9049eadd175a4a497709939128556e3ec/scipy-1.16.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d85495cef541729a70cdddbbf3e6b903421bc1af3e8e3a9a72a06751f33b7c39", size = 38129275, upload-time = "2025-07-27T16:27:23.873Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/94/994369978509f227cba7dfb9e623254d0d5559506fe994aef4bea3ed469c/scipy-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:226652fca853008119c03a8ce71ffe1b3f6d2844cc1686e8f9806edafae68596", size = 38644572, upload-time = "2025-07-27T16:27:32.637Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/d9/ec4864f5896232133f51382b54a08de91a9d1af7a76dfa372894026dfee2/scipy-1.16.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:81b433bbeaf35728dad619afc002db9b189e45eebe2cd676effe1fb93fef2b9c", size = 36575194, upload-time = "2025-07-27T16:27:41.321Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/6d/40e81ecfb688e9d25d34a847dca361982a6addf8e31f0957b1a54fbfa994/scipy-1.16.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:886cc81fdb4c6903a3bb0464047c25a6d1016fef77bb97949817d0c0d79f9e04", size = 28594590, upload-time = "2025-07-27T16:27:49.204Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/37/9f65178edfcc629377ce9a64fc09baebea18c80a9e57ae09a52edf84880b/scipy-1.16.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:15240c3aac087a522b4eaedb09f0ad061753c5eebf1ea430859e5bf8640d5919", size = 20866458, upload-time = "2025-07-27T16:27:54.98Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/7b/749a66766871ea4cb1d1ea10f27004db63023074c22abed51f22f09770e0/scipy-1.16.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:65f81a25805f3659b48126b5053d9e823d3215e4a63730b5e1671852a1705921", size = 23539318, upload-time = "2025-07-27T16:28:01.604Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/db/8d4afec60eb833a666434d4541a3151eedbf2494ea6d4d468cbe877f00cd/scipy-1.16.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6c62eea7f607f122069b9bad3f99489ddca1a5173bef8a0c75555d7488b6f725", size = 33292899, upload-time = "2025-07-27T16:28:09.147Z" },
+ { url = "https://files.pythonhosted.org/packages/51/1e/79023ca3bbb13a015d7d2757ecca3b81293c663694c35d6541b4dca53e98/scipy-1.16.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f965bbf3235b01c776115ab18f092a95aa74c271a52577bcb0563e85738fd618", size = 35162637, upload-time = "2025-07-27T16:28:17.535Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/49/0648665f9c29fdaca4c679182eb972935b3b4f5ace41d323c32352f29816/scipy-1.16.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f006e323874ffd0b0b816d8c6a8e7f9a73d55ab3b8c3f72b752b226d0e3ac83d", size = 35490507, upload-time = "2025-07-27T16:28:25.705Z" },
+ { url = "https://files.pythonhosted.org/packages/62/8f/66cbb9d6bbb18d8c658f774904f42a92078707a7c71e5347e8bf2f52bb89/scipy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8fd15fc5085ab4cca74cb91fe0a4263b1f32e4420761ddae531ad60934c2119", size = 37923998, upload-time = "2025-07-27T16:28:34.339Z" },
+ { url = "https://files.pythonhosted.org/packages/14/c3/61f273ae550fbf1667675701112e380881905e28448c080b23b5a181df7c/scipy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:f7b8013c6c066609577d910d1a2a077021727af07b6fab0ee22c2f901f22352a", size = 38508060, upload-time = "2025-07-27T16:28:43.242Z" },
+ { url = "https://files.pythonhosted.org/packages/93/0b/b5c99382b839854a71ca9482c684e3472badc62620287cbbdab499b75ce6/scipy-1.16.1-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:5451606823a5e73dfa621a89948096c6528e2896e40b39248295d3a0138d594f", size = 36533717, upload-time = "2025-07-27T16:28:51.706Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/e5/69ab2771062c91e23e07c12e7d5033a6b9b80b0903ee709c3c36b3eb520c/scipy-1.16.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:89728678c5ca5abd610aee148c199ac1afb16e19844401ca97d43dc548a354eb", size = 28570009, upload-time = "2025-07-27T16:28:57.017Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/69/bd75dbfdd3cf524f4d753484d723594aed62cfaac510123e91a6686d520b/scipy-1.16.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e756d688cb03fd07de0fffad475649b03cb89bee696c98ce508b17c11a03f95c", size = 20841942, upload-time = "2025-07-27T16:29:01.152Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/74/add181c87663f178ba7d6144b370243a87af8476664d5435e57d599e6874/scipy-1.16.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5aa2687b9935da3ed89c5dbed5234576589dd28d0bf7cd237501ccfbdf1ad608", size = 23498507, upload-time = "2025-07-27T16:29:05.202Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/74/ece2e582a0d9550cee33e2e416cc96737dce423a994d12bbe59716f47ff1/scipy-1.16.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0851f6a1e537fe9399f35986897e395a1aa61c574b178c0d456be5b1a0f5ca1f", size = 33286040, upload-time = "2025-07-27T16:29:10.201Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/82/08e4076df538fb56caa1d489588d880ec7c52d8273a606bb54d660528f7c/scipy-1.16.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fedc2cbd1baed37474b1924c331b97bdff611d762c196fac1a9b71e67b813b1b", size = 35176096, upload-time = "2025-07-27T16:29:17.091Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/79/cd710aab8c921375711a8321c6be696e705a120e3011a643efbbcdeeabcc/scipy-1.16.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2ef500e72f9623a6735769e4b93e9dcb158d40752cdbb077f305487e3e2d1f45", size = 35490328, upload-time = "2025-07-27T16:29:22.928Z" },
+ { url = "https://files.pythonhosted.org/packages/71/73/e9cc3d35ee4526d784520d4494a3e1ca969b071fb5ae5910c036a375ceec/scipy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:978d8311674b05a8f7ff2ea6c6bce5d8b45a0cb09d4c5793e0318f448613ea65", size = 37939921, upload-time = "2025-07-27T16:29:29.108Z" },
+ { url = "https://files.pythonhosted.org/packages/21/12/c0efd2941f01940119b5305c375ae5c0fcb7ec193f806bd8f158b73a1782/scipy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:81929ed0fa7a5713fcdd8b2e6f73697d3b4c4816d090dd34ff937c20fa90e8ab", size = 38479462, upload-time = "2025-07-27T16:30:24.078Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/19/c3d08b675260046a991040e1ea5d65f91f40c7df1045fffff412dcfc6765/scipy-1.16.1-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:bcc12db731858abda693cecdb3bdc9e6d4bd200213f49d224fe22df82687bdd6", size = 36938832, upload-time = "2025-07-27T16:29:35.057Z" },
+ { url = "https://files.pythonhosted.org/packages/81/f2/ce53db652c033a414a5b34598dba6b95f3d38153a2417c5a3883da429029/scipy-1.16.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:744d977daa4becb9fc59135e75c069f8d301a87d64f88f1e602a9ecf51e77b27", size = 29093084, upload-time = "2025-07-27T16:29:40.201Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/ae/7a10ff04a7dc15f9057d05b33737ade244e4bd195caa3f7cc04d77b9e214/scipy-1.16.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:dc54f76ac18073bcecffb98d93f03ed6b81a92ef91b5d3b135dcc81d55a724c7", size = 21365098, upload-time = "2025-07-27T16:29:44.295Z" },
+ { url = "https://files.pythonhosted.org/packages/36/ac/029ff710959932ad3c2a98721b20b405f05f752f07344622fd61a47c5197/scipy-1.16.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:367d567ee9fc1e9e2047d31f39d9d6a7a04e0710c86e701e053f237d14a9b4f6", size = 23896858, upload-time = "2025-07-27T16:29:48.784Z" },
+ { url = "https://files.pythonhosted.org/packages/71/13/d1ef77b6bd7898720e1f0b6b3743cb945f6c3cafa7718eaac8841035ab60/scipy-1.16.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4cf5785e44e19dcd32a0e4807555e1e9a9b8d475c6afff3d21c3c543a6aa84f4", size = 33438311, upload-time = "2025-07-27T16:29:54.164Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/e0/e64a6821ffbb00b4c5b05169f1c1fddb4800e9307efe3db3788995a82a2c/scipy-1.16.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3d0b80fb26d3e13a794c71d4b837e2a589d839fd574a6bbb4ee1288c213ad4a3", size = 35279542, upload-time = "2025-07-27T16:30:00.249Z" },
+ { url = "https://files.pythonhosted.org/packages/57/59/0dc3c8b43e118f1e4ee2b798dcc96ac21bb20014e5f1f7a8e85cc0653bdb/scipy-1.16.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8503517c44c18d1030d666cb70aaac1cc8913608816e06742498833b128488b7", size = 35667665, upload-time = "2025-07-27T16:30:05.916Z" },
+ { url = "https://files.pythonhosted.org/packages/45/5f/844ee26e34e2f3f9f8febb9343748e72daeaec64fe0c70e9bf1ff84ec955/scipy-1.16.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:30cc4bb81c41831ecfd6dc450baf48ffd80ef5aed0f5cf3ea775740e80f16ecc", size = 38045210, upload-time = "2025-07-27T16:30:11.655Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/d7/210f2b45290f444f1de64bc7353aa598ece9f0e90c384b4a156f9b1a5063/scipy-1.16.1-cp313-cp313t-win_amd64.whl", hash = "sha256:c24fa02f7ed23ae514460a22c57eca8f530dbfa50b1cfdbf4f37c05b5309cc39", size = 38593661, upload-time = "2025-07-27T16:30:17.825Z" },
+ { url = "https://files.pythonhosted.org/packages/81/ea/84d481a5237ed223bd3d32d6e82d7a6a96e34756492666c260cef16011d1/scipy-1.16.1-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:796a5a9ad36fa3a782375db8f4241ab02a091308eb079746bc0f874c9b998318", size = 36525921, upload-time = "2025-07-27T16:30:30.081Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/9f/d9edbdeff9f3a664807ae3aea383e10afaa247e8e6255e6d2aa4515e8863/scipy-1.16.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:3ea0733a2ff73fd6fdc5fecca54ee9b459f4d74f00b99aced7d9a3adb43fb1cc", size = 28564152, upload-time = "2025-07-27T16:30:35.336Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/95/8125bcb1fe04bc267d103e76516243e8d5e11229e6b306bda1024a5423d1/scipy-1.16.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:85764fb15a2ad994e708258bb4ed8290d1305c62a4e1ef07c414356a24fcfbf8", size = 20836028, upload-time = "2025-07-27T16:30:39.421Z" },
+ { url = "https://files.pythonhosted.org/packages/77/9c/bf92e215701fc70bbcd3d14d86337cf56a9b912a804b9c776a269524a9e9/scipy-1.16.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:ca66d980469cb623b1759bdd6e9fd97d4e33a9fad5b33771ced24d0cb24df67e", size = 23489666, upload-time = "2025-07-27T16:30:43.663Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/00/5e941d397d9adac41b02839011594620d54d99488d1be5be755c00cde9ee/scipy-1.16.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e7cc1ffcc230f568549fc56670bcf3df1884c30bd652c5da8138199c8c76dae0", size = 33358318, upload-time = "2025-07-27T16:30:48.982Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/87/8db3aa10dde6e3e8e7eb0133f24baa011377d543f5b19c71469cf2648026/scipy-1.16.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ddfb1e8d0b540cb4ee9c53fc3dea3186f97711248fb94b4142a1b27178d8b4b", size = 35185724, upload-time = "2025-07-27T16:30:54.26Z" },
+ { url = "https://files.pythonhosted.org/packages/89/b4/6ab9ae443216807622bcff02690262d8184078ea467efee2f8c93288a3b1/scipy-1.16.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4dc0e7be79e95d8ba3435d193e0d8ce372f47f774cffd882f88ea4e1e1ddc731", size = 35554335, upload-time = "2025-07-27T16:30:59.765Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/9a/d0e9dc03c5269a1afb60661118296a32ed5d2c24298af61b676c11e05e56/scipy-1.16.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f23634f9e5adb51b2a77766dac217063e764337fbc816aa8ad9aaebcd4397fd3", size = 37960310, upload-time = "2025-07-27T16:31:06.151Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/00/c8f3130a50521a7977874817ca89e0599b1b4ee8e938bad8ae798a0e1f0d/scipy-1.16.1-cp314-cp314-win_amd64.whl", hash = "sha256:57d75524cb1c5a374958a2eae3d84e1929bb971204cc9d52213fb8589183fc19", size = 39319239, upload-time = "2025-07-27T16:31:59.942Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/f2/1ca3eda54c3a7e4c92f6acef7db7b3a057deb135540d23aa6343ef8ad333/scipy-1.16.1-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:d8da7c3dd67bcd93f15618938f43ed0995982eb38973023d46d4646c4283ad65", size = 36939460, upload-time = "2025-07-27T16:31:11.865Z" },
+ { url = "https://files.pythonhosted.org/packages/80/30/98c2840b293a132400c0940bb9e140171dcb8189588619048f42b2ce7b4f/scipy-1.16.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:cc1d2f2fd48ba1e0620554fe5bc44d3e8f5d4185c8c109c7fbdf5af2792cfad2", size = 29093322, upload-time = "2025-07-27T16:31:17.045Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/e6/1e6e006e850622cf2a039b62d1a6ddc4497d4851e58b68008526f04a9a00/scipy-1.16.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:21a611ced9275cb861bacadbada0b8c0623bc00b05b09eb97f23b370fc2ae56d", size = 21365329, upload-time = "2025-07-27T16:31:21.188Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/02/72a5aa5b820589dda9a25e329ca752842bfbbaf635e36bc7065a9b42216e/scipy-1.16.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:8dfbb25dffc4c3dd9371d8ab456ca81beeaf6f9e1c2119f179392f0dc1ab7695", size = 23897544, upload-time = "2025-07-27T16:31:25.408Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/dc/7122d806a6f9eb8a33532982234bed91f90272e990f414f2830cfe656e0b/scipy-1.16.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f0ebb7204f063fad87fc0a0e4ff4a2ff40b2a226e4ba1b7e34bf4b79bf97cd86", size = 33442112, upload-time = "2025-07-27T16:31:30.62Z" },
+ { url = "https://files.pythonhosted.org/packages/24/39/e383af23564daa1021a5b3afbe0d8d6a68ec639b943661841f44ac92de85/scipy-1.16.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f1b9e5962656f2734c2b285a8745358ecb4e4efbadd00208c80a389227ec61ff", size = 35286594, upload-time = "2025-07-27T16:31:36.112Z" },
+ { url = "https://files.pythonhosted.org/packages/95/47/1a0b0aff40c3056d955f38b0df5d178350c3d74734ec54f9c68d23910be5/scipy-1.16.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e1a106f8c023d57a2a903e771228bf5c5b27b5d692088f457acacd3b54511e4", size = 35665080, upload-time = "2025-07-27T16:31:42.025Z" },
+ { url = "https://files.pythonhosted.org/packages/64/df/ce88803e9ed6e27fe9b9abefa157cf2c80e4fa527cf17ee14be41f790ad4/scipy-1.16.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:709559a1db68a9abc3b2c8672c4badf1614f3b440b3ab326d86a5c0491eafae3", size = 38050306, upload-time = "2025-07-27T16:31:48.109Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/6c/a76329897a7cae4937d403e623aa6aaea616a0bb5b36588f0b9d1c9a3739/scipy-1.16.1-cp314-cp314t-win_amd64.whl", hash = "sha256:c0c804d60492a0aad7f5b2bb1862f4548b990049e27e828391ff2bf6f7199998", size = 39427705, upload-time = "2025-07-27T16:31:53.96Z" },
+]
+
+[[package]]
+name = "semantic-version"
+version = "2.10.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/7d/31/f2289ce78b9b473d582568c234e104d2a342fd658cc288a7553d83bb8595/semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c", size = 52289, upload-time = "2022-05-26T13:35:23.454Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6a/23/8146aad7d88f4fcb3a6218f41a60f6c2d4e3a72de72da1825dc7c8f7877c/semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177", size = 15552, upload-time = "2022-05-26T13:35:21.206Z" },
+]
+
+[[package]]
+name = "sentencepiece"
+version = "0.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/15/15/2e7a025fc62d764b151ae6d0f2a92f8081755ebe8d4a64099accc6f77ba6/sentencepiece-0.2.1.tar.gz", hash = "sha256:8138cec27c2f2282f4a34d9a016e3374cd40e5c6e9cb335063db66a0a3b71fad", size = 3228515, upload-time = "2025-08-12T07:00:51.718Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/af/31/5b7cccb307b485db1a2372d6d2980b0a65d067f8be5ca943a103b4acd5b3/sentencepiece-0.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e10fa50bdbaa5e2445dbd387979980d391760faf0ec99a09bd7780ff37eaec44", size = 1942557, upload-time = "2025-08-12T06:59:12.379Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/41/0ac923a8e685ad290c5afc8ae55c5844977b8d75076fcc04302b9a324274/sentencepiece-0.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f27ae6deea72efdb6f361750c92f6c21fd0ad087445082770cc34015213c526", size = 1325384, upload-time = "2025-08-12T06:59:14.334Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/ef/3751555d67daf9003384978f169d31c775cb5c7baf28633caaf1eb2b2b4d/sentencepiece-0.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60937c959e6f44159fdd9f56fbdd302501f96114a5ba436829496d5f32d8de3f", size = 1253317, upload-time = "2025-08-12T06:59:16.247Z" },
+ { url = "https://files.pythonhosted.org/packages/46/a5/742c69b7bd144eb32b6e5fd50dbd8abbbc7a95fce2fe16e50156fa400e3b/sentencepiece-0.2.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8b1d91545578852f128650b8cce4ec20f93d39b378ff554ebe66290f2dabb92", size = 1316379, upload-time = "2025-08-12T06:59:17.825Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/89/8deeafbba2871e8fa10f20f17447786f4ac38085925335728d360eaf4cae/sentencepiece-0.2.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:27e38eee653abc3d387862e67bc5c8b6f428cd604e688b85d29170b7e725c26c", size = 1387926, upload-time = "2025-08-12T06:59:19.395Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/ca/67fe73005f0ab617c6a970b199754e28e524b6873aa7025224fad3cda252/sentencepiece-0.2.1-cp310-cp310-win32.whl", hash = "sha256:251874d720ac7f28024a168501f3c7bb15d1802245f6e66de565f18bbb9b5eaa", size = 999550, upload-time = "2025-08-12T06:59:20.844Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/33/dc5b54042050d2dda4229c3ce1f862541c99966390b6aa20f54d520d2dc2/sentencepiece-0.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:e52144670738b4b477fade6c2a9b6af71a8d0094514c9853ac9f6fc1fcfabae7", size = 1054613, upload-time = "2025-08-12T06:59:22.255Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/19/1ea47f46ff97fe04422b78997da1a37cd632f414aae042d27a9009c5b733/sentencepiece-0.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:9076430ac25dfa7147d9d05751dbc66a04bc1aaac371c07f84952979ea59f0d0", size = 1033884, upload-time = "2025-08-12T06:59:24.194Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/15/46afbab00733d81788b64be430ca1b93011bb9388527958e26cc31832de5/sentencepiece-0.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6356d0986b8b8dc351b943150fcd81a1c6e6e4d439772e8584c64230e58ca987", size = 1942560, upload-time = "2025-08-12T06:59:25.82Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/79/7c01b8ef98a0567e9d84a4e7a910f8e7074fcbf398a5cd76f93f4b9316f9/sentencepiece-0.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8f8ba89a3acb3dc1ae90f65ec1894b0b9596fdb98ab003ff38e058f898b39bc7", size = 1325385, upload-time = "2025-08-12T06:59:27.722Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/88/2b41e07bd24f33dcf2f18ec3b74247aa4af3526bad8907b8727ea3caba03/sentencepiece-0.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:02593eca45440ef39247cee8c47322a34bdcc1d8ae83ad28ba5a899a2cf8d79a", size = 1253319, upload-time = "2025-08-12T06:59:29.306Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/54/38a1af0c6210a3c6f95aa46d23d6640636d020fba7135cd0d9a84ada05a7/sentencepiece-0.2.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a0d15781a171d188b661ae4bde1d998c303f6bd8621498c50c671bd45a4798e", size = 1316162, upload-time = "2025-08-12T06:59:30.914Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/66/fb191403ade791ad2c3c1e72fe8413e63781b08cfa3aa4c9dfc536d6e795/sentencepiece-0.2.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f5a3e0d9f445ed9d66c0fec47d4b23d12cfc858b407a03c194c1b26c2ac2a63", size = 1387785, upload-time = "2025-08-12T06:59:32.491Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/2d/3bd9b08e70067b2124518b308db6a84a4f8901cc8a4317e2e4288cdd9b4d/sentencepiece-0.2.1-cp311-cp311-win32.whl", hash = "sha256:6d297a1748d429ba8534eebe5535448d78b8acc32d00a29b49acf28102eeb094", size = 999555, upload-time = "2025-08-12T06:59:34.475Z" },
+ { url = "https://files.pythonhosted.org/packages/32/b8/f709977f5fda195ae1ea24f24e7c581163b6f142b1005bc3d0bbfe4d7082/sentencepiece-0.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:82d9ead6591015f009cb1be1cb1c015d5e6f04046dbb8c9588b931e869a29728", size = 1054617, upload-time = "2025-08-12T06:59:36.461Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/40/a1fc23be23067da0f703709797b464e8a30a1c78cc8a687120cd58d4d509/sentencepiece-0.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:39f8651bd10974eafb9834ce30d9bcf5b73e1fc798a7f7d2528f9820ca86e119", size = 1033877, upload-time = "2025-08-12T06:59:38.391Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/be/32ce495aa1d0e0c323dcb1ba87096037358edee539cac5baf8755a6bd396/sentencepiece-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:57cae326c8727de58c85977b175af132a7138d84c764635d7e71bbee7e774133", size = 1943152, upload-time = "2025-08-12T06:59:40.048Z" },
+ { url = "https://files.pythonhosted.org/packages/88/7e/ff23008899a58678e98c6ff592bf4d368eee5a71af96d0df6b38a039dd4f/sentencepiece-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:56dd39a3c4d6493db3cdca7e8cc68c6b633f0d4195495cbadfcf5af8a22d05a6", size = 1325651, upload-time = "2025-08-12T06:59:41.536Z" },
+ { url = "https://files.pythonhosted.org/packages/19/84/42eb3ce4796777a1b5d3699dfd4dca85113e68b637f194a6c8d786f16a04/sentencepiece-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9381351182ff9888cc80e41c632e7e274b106f450de33d67a9e8f6043da6f76", size = 1253645, upload-time = "2025-08-12T06:59:42.903Z" },
+ { url = "https://files.pythonhosted.org/packages/89/fa/d3d5ebcba3cb9e6d3775a096251860c41a6bc53a1b9461151df83fe93255/sentencepiece-0.2.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99f955df238021bf11f0fc37cdb54fd5e5b5f7fd30ecc3d93fb48b6815437167", size = 1316273, upload-time = "2025-08-12T06:59:44.476Z" },
+ { url = "https://files.pythonhosted.org/packages/04/88/14f2f4a2b922d8b39be45bf63d79e6cd3a9b2f248b2fcb98a69b12af12f5/sentencepiece-0.2.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0cdfecef430d985f1c2bcbfff3defd1d95dae876fbd0173376012d2d7d24044b", size = 1387881, upload-time = "2025-08-12T06:59:46.09Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/b8/903e5ccb77b4ef140605d5d71b4f9e0ad95d456d6184688073ed11712809/sentencepiece-0.2.1-cp312-cp312-win32.whl", hash = "sha256:a483fd29a34c3e34c39ac5556b0a90942bec253d260235729e50976f5dba1068", size = 999540, upload-time = "2025-08-12T06:59:48.023Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/81/92df5673c067148c2545b1bfe49adfd775bcc3a169a047f5a0e6575ddaca/sentencepiece-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4cdc7c36234fda305e85c32949c5211faaf8dd886096c7cea289ddc12a2d02de", size = 1054671, upload-time = "2025-08-12T06:59:49.895Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/02/c5e3bc518655d714622bec87d83db9cdba1cd0619a4a04e2109751c4f47f/sentencepiece-0.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:daeb5e9e9fcad012324807856113708614d534f596d5008638eb9b40112cd9e4", size = 1033923, upload-time = "2025-08-12T06:59:51.952Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/4a/85fbe1706d4d04a7e826b53f327c4b80f849cf1c7b7c5e31a20a97d8f28b/sentencepiece-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dcd8161eee7b41aae57ded06272905dbd680a0a04b91edd0f64790c796b2f706", size = 1943150, upload-time = "2025-08-12T06:59:53.588Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/83/4cfb393e287509fc2155480b9d184706ef8d9fa8cbf5505d02a5792bf220/sentencepiece-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c6c8f42949f419ff8c7e9960dbadcfbc982d7b5efc2f6748210d3dd53a7de062", size = 1325651, upload-time = "2025-08-12T06:59:55.073Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/de/5a007fb53b1ab0aafc69d11a5a3dd72a289d5a3e78dcf2c3a3d9b14ffe93/sentencepiece-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:097f3394e99456e9e4efba1737c3749d7e23563dd1588ce71a3d007f25475fff", size = 1253641, upload-time = "2025-08-12T06:59:56.562Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/d2/f552be5928105588f4f4d66ee37dd4c61460d8097e62d0e2e0eec41bc61d/sentencepiece-0.2.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7b670879c370d350557edabadbad1f6561a9e6968126e6debca4029e5547820", size = 1316271, upload-time = "2025-08-12T06:59:58.109Z" },
+ { url = "https://files.pythonhosted.org/packages/96/df/0cfe748ace5485be740fed9476dee7877f109da32ed0d280312c94ec259f/sentencepiece-0.2.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7f0fd2f2693309e6628aeeb2e2faf6edd221134dfccac3308ca0de01f8dab47", size = 1387882, upload-time = "2025-08-12T07:00:00.701Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/dd/f7774d42a881ced8e1739f393ab1e82ece39fc9abd4779e28050c2e975b5/sentencepiece-0.2.1-cp313-cp313-win32.whl", hash = "sha256:92b3816aa2339355fda2c8c4e021a5de92180b00aaccaf5e2808972e77a4b22f", size = 999541, upload-time = "2025-08-12T07:00:02.709Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/e9/932b9eae6fd7019548321eee1ab8d5e3b3d1294df9d9a0c9ac517c7b636d/sentencepiece-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:10ed3dab2044c47f7a2e7b4969b0c430420cdd45735d78c8f853191fa0e3148b", size = 1054669, upload-time = "2025-08-12T07:00:04.915Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/3a/76488a00ea7d6931689cda28726a1447d66bf1a4837943489314593d5596/sentencepiece-0.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:ac650534e2251083c5f75dde4ff28896ce7c8904133dc8fef42780f4d5588fcd", size = 1033922, upload-time = "2025-08-12T07:00:06.496Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/b6/08fe2ce819e02ccb0296f4843e3f195764ce9829cbda61b7513f29b95718/sentencepiece-0.2.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:8dd4b477a7b069648d19363aad0cab9bad2f4e83b2d179be668efa672500dc94", size = 1946052, upload-time = "2025-08-12T07:00:08.136Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/d9/1ea0e740591ff4c6fc2b6eb1d7510d02f3fb885093f19b2f3abd1363b402/sentencepiece-0.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0c0f672da370cc490e4c59d89e12289778310a0e71d176c541e4834759e1ae07", size = 1327408, upload-time = "2025-08-12T07:00:09.572Z" },
+ { url = "https://files.pythonhosted.org/packages/99/7e/1fb26e8a21613f6200e1ab88824d5d203714162cf2883248b517deb500b7/sentencepiece-0.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad8493bea8432dae8d6830365352350f3b4144415a1d09c4c8cb8d30cf3b6c3c", size = 1254857, upload-time = "2025-08-12T07:00:11.021Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/85/c72fd1f3c7a6010544d6ae07f8ddb38b5e2a7e33bd4318f87266c0bbafbf/sentencepiece-0.2.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b81a24733726e3678d2db63619acc5a8dccd074f7aa7a54ecd5ca33ca6d2d596", size = 1315722, upload-time = "2025-08-12T07:00:12.989Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/e8/661e5bd82a8aa641fd6c1020bd0e890ef73230a2b7215ddf9c8cd8e941c2/sentencepiece-0.2.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0a81799d0a68d618e89063fb423c3001a034c893069135ffe51fee439ae474d6", size = 1387452, upload-time = "2025-08-12T07:00:15.088Z" },
+ { url = "https://files.pythonhosted.org/packages/99/5e/ae66c361023a470afcbc1fbb8da722c72ea678a2fcd9a18f1a12598c7501/sentencepiece-0.2.1-cp313-cp313t-win32.whl", hash = "sha256:89a3ea015517c42c0341d0d962f3e6aaf2cf10d71b1932d475c44ba48d00aa2b", size = 1002501, upload-time = "2025-08-12T07:00:16.966Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/03/d332828c4ff764e16c1b56c2c8f9a33488bbe796b53fb6b9c4205ddbf167/sentencepiece-0.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:33f068c9382dc2e7c228eedfd8163b52baa86bb92f50d0488bf2b7da7032e484", size = 1057555, upload-time = "2025-08-12T07:00:18.573Z" },
+ { url = "https://files.pythonhosted.org/packages/88/14/5aee0bf0864df9bd82bd59e7711362908e4935e3f9cdc1f57246b5d5c9b9/sentencepiece-0.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:b3616ad246f360e52c85781e47682d31abfb6554c779e42b65333d4b5f44ecc0", size = 1036042, upload-time = "2025-08-12T07:00:20.209Z" },
+ { url = "https://files.pythonhosted.org/packages/24/9c/89eb8b2052f720a612478baf11c8227dcf1dc28cd4ea4c0c19506b5af2a2/sentencepiece-0.2.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:5d0350b686c320068702116276cfb26c066dc7e65cfef173980b11bb4d606719", size = 1943147, upload-time = "2025-08-12T07:00:21.809Z" },
+ { url = "https://files.pythonhosted.org/packages/82/0b/a1432bc87f97c2ace36386ca23e8bd3b91fb40581b5e6148d24b24186419/sentencepiece-0.2.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c7f54a31cde6fa5cb030370566f68152a742f433f8d2be458463d06c208aef33", size = 1325624, upload-time = "2025-08-12T07:00:23.289Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/99/bbe054ebb5a5039457c590e0a4156ed073fb0fe9ce4f7523404dd5b37463/sentencepiece-0.2.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c83b85ab2d6576607f31df77ff86f28182be4a8de6d175d2c33ca609925f5da1", size = 1253670, upload-time = "2025-08-12T07:00:24.69Z" },
+ { url = "https://files.pythonhosted.org/packages/19/ad/d5c7075f701bd97971d7c2ac2904f227566f51ef0838dfbdfdccb58cd212/sentencepiece-0.2.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1855f57db07b51fb51ed6c9c452f570624d2b169b36f0f79ef71a6e6c618cd8b", size = 1316247, upload-time = "2025-08-12T07:00:26.435Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/03/35fbe5f3d9a7435eebd0b473e09584bd3cc354ce118b960445b060d33781/sentencepiece-0.2.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01e6912125cb45d3792f530a4d38f8e21bf884d6b4d4ade1b2de5cf7a8d2a52b", size = 1387894, upload-time = "2025-08-12T07:00:28.339Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/aa/956ef729aafb6c8f9c443104c9636489093bb5c61d6b90fc27aa1a865574/sentencepiece-0.2.1-cp314-cp314-win32.whl", hash = "sha256:c415c9de1447e0a74ae3fdb2e52f967cb544113a3a5ce3a194df185cbc1f962f", size = 1096698, upload-time = "2025-08-12T07:00:29.764Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/cb/fe400d8836952cc535c81a0ce47dc6875160e5fedb71d2d9ff0e9894c2a6/sentencepiece-0.2.1-cp314-cp314-win_amd64.whl", hash = "sha256:881b2e44b14fc19feade3cbed314be37de639fc415375cefaa5bc81a4be137fd", size = 1155115, upload-time = "2025-08-12T07:00:32.865Z" },
+ { url = "https://files.pythonhosted.org/packages/32/89/047921cf70f36c7b6b6390876b2399b3633ab73b8d0cb857e5a964238941/sentencepiece-0.2.1-cp314-cp314-win_arm64.whl", hash = "sha256:2005242a16d2dc3ac5fe18aa7667549134d37854823df4c4db244752453b78a8", size = 1133890, upload-time = "2025-08-12T07:00:34.763Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/11/5b414b9fae6255b5fb1e22e2ed3dc3a72d3a694e5703910e640ac78346bb/sentencepiece-0.2.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:a19adcec27c524cb7069a1c741060add95f942d1cbf7ad0d104dffa0a7d28a2b", size = 1946081, upload-time = "2025-08-12T07:00:36.97Z" },
+ { url = "https://files.pythonhosted.org/packages/77/eb/7a5682bb25824db8545f8e5662e7f3e32d72a508fdce086029d89695106b/sentencepiece-0.2.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e37e4b4c4a11662b5db521def4e44d4d30ae69a1743241412a93ae40fdcab4bb", size = 1327406, upload-time = "2025-08-12T07:00:38.669Z" },
+ { url = "https://files.pythonhosted.org/packages/03/b0/811dae8fb9f2784e138785d481469788f2e0d0c109c5737372454415f55f/sentencepiece-0.2.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:477c81505db072b3ab627e7eab972ea1025331bd3a92bacbf798df2b75ea86ec", size = 1254846, upload-time = "2025-08-12T07:00:40.611Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/23/195b2e7ec85ebb6a547969f60b723c7aca5a75800ece6cc3f41da872d14e/sentencepiece-0.2.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:010f025a544ef770bb395091d57cb94deb9652d8972e0d09f71d85d5a0816c8c", size = 1315721, upload-time = "2025-08-12T07:00:42.914Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/aa/553dbe4178b5f23eb28e59393dddd64186178b56b81d9b8d5c3ff1c28395/sentencepiece-0.2.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:733e59ff1794d26db706cd41fc2d7ca5f6c64a820709cb801dc0ea31780d64ab", size = 1387458, upload-time = "2025-08-12T07:00:44.56Z" },
+ { url = "https://files.pythonhosted.org/packages/66/7c/08ff0012507297a4dd74a5420fdc0eb9e3e80f4e88cab1538d7f28db303d/sentencepiece-0.2.1-cp314-cp314t-win32.whl", hash = "sha256:d3233770f78e637dc8b1fda2cd7c3b99ec77e7505041934188a4e7fe751de3b0", size = 1099765, upload-time = "2025-08-12T07:00:46.058Z" },
+ { url = "https://files.pythonhosted.org/packages/91/d5/2a69e1ce15881beb9ddfc7e3f998322f5cedcd5e4d244cb74dade9441663/sentencepiece-0.2.1-cp314-cp314t-win_amd64.whl", hash = "sha256:5e4366c97b68218fd30ea72d70c525e6e78a6c0a88650f57ac4c43c63b234a9d", size = 1157807, upload-time = "2025-08-12T07:00:47.673Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/16/54f611fcfc2d1c46cbe3ec4169780b2cfa7cf63708ef2b71611136db7513/sentencepiece-0.2.1-cp314-cp314t-win_arm64.whl", hash = "sha256:105e36e75cbac1292642045458e8da677b2342dcd33df503e640f0b457cb6751", size = 1136264, upload-time = "2025-08-12T07:00:49.485Z" },
+]
+
+[[package]]
+name = "sentry-sdk"
+version = "2.37.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/af/9a/0b2eafc31d5c7551b6bef54ca10d29adea471e0bd16bfe985a9dc4b6633e/sentry_sdk-2.37.0.tar.gz", hash = "sha256:2c661a482dd5accf3df58464f31733545745bb4d5cf8f5e46e0e1c4eed88479f", size = 346203, upload-time = "2025-09-05T11:41:43.848Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/07/d5/f9f4a2bf5db2ca8f692c46f3821fee1f302f1b76a0e2914aee5390fca565/sentry_sdk-2.37.0-py2.py3-none-any.whl", hash = "sha256:89c1ed205d5c25926558b64a9bed8a5b4fb295b007cecc32c0ec4bf7694da2e1", size = 368304, upload-time = "2025-09-05T11:41:41.286Z" },
+]
+
+[[package]]
+name = "setuptools"
+version = "80.9.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" },
+]
+
+[[package]]
+name = "shellingham"
+version = "1.5.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
+]
+
+[[package]]
+name = "six"
+version = "1.17.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
+]
+
+[[package]]
+name = "smmap"
+version = "5.0.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" },
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
+]
+
+[[package]]
+name = "spaces"
+version = "0.40.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "gradio" },
+ { name = "httpx" },
+ { name = "packaging" },
+ { name = "psutil" },
+ { name = "pydantic" },
+ { name = "requests" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/01/89/68c673fec1a99d874755f6f046de1d8dba21836200898f29ff82d7f124e2/spaces-0.40.1.tar.gz", hash = "sha256:2dfe5b34ef49d4f7df21bbeb5f4bbc9f94b127aad72934408ca6324f73fe7994", size = 26140, upload-time = "2025-08-22T09:15:07.319Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4c/89/6b717e373549e441e90db55675a6a8b1a9e97ac1b7dfea6741c4ce5349b1/spaces-0.40.1-py3-none-any.whl", hash = "sha256:dc19dda8cd7f7b5005804ccaa163bc801aa6f60f14cd747dc86b0349a17454fe", size = 33620, upload-time = "2025-08-22T09:15:06.287Z" },
+]
+
+[[package]]
+name = "starlette"
+version = "0.47.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/15/b9/cc3017f9a9c9b6e27c5106cc10cc7904653c3eec0729793aec10479dd669/starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9", size = 2584144, upload-time = "2025-08-24T13:36:42.122Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991, upload-time = "2025-08-24T13:36:40.887Z" },
+]
+
+[[package]]
+name = "streamlit"
+version = "1.49.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "altair" },
+ { name = "blinker" },
+ { name = "cachetools" },
+ { name = "click" },
+ { name = "gitpython" },
+ { name = "numpy" },
+ { name = "packaging" },
+ { name = "pandas" },
+ { name = "pillow" },
+ { name = "protobuf" },
+ { name = "pyarrow" },
+ { name = "pydeck" },
+ { name = "requests" },
+ { name = "tenacity" },
+ { name = "toml" },
+ { name = "tornado" },
+ { name = "typing-extensions" },
+ { name = "watchdog", marker = "sys_platform != 'darwin'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/24/17/c8024e4ef311dc7833987c603a7d0ebe82f8aa352aaca53b27be3f6b7f01/streamlit-1.49.1.tar.gz", hash = "sha256:6f213f1e43f035143a56f58ad50068d8a09482f0a2dad1050d7e7e99a9689818", size = 9640116, upload-time = "2025-08-29T18:35:45.055Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/85/9e/146cdef515ad07e56c3aa942d087562498592d441aa3bae845ef0cd8fca3/streamlit-1.49.1-py3-none-any.whl", hash = "sha256:ad7b6d0dc35db168587acf96f80378249467fc057ed739a41c511f6bf5aa173b", size = 10044388, upload-time = "2025-08-29T18:35:42.239Z" },
+]
+
+[[package]]
+name = "sympy"
+version = "1.14.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "mpmath" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
+]
+
+[[package]]
+name = "tenacity"
+version = "9.1.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
+]
+
+[[package]]
+name = "tim"
+version = "0.1.0"
+source = { virtual = "." }
+dependencies = [
+ { name = "accelerate" },
+ { name = "bitsandbytes" },
+ { name = "diffusers" },
+ { name = "einops" },
+ { name = "flash-attn" },
+ { name = "gradio" },
+ { name = "imageio" },
+ { name = "imageio-ffmpeg" },
+ { name = "moviepy" },
+ { name = "numpy" },
+ { name = "omegaconf" },
+ { name = "pillow" },
+ { name = "safetensors" },
+ { name = "sentencepiece" },
+ { name = "spaces" },
+ { name = "streamlit" },
+ { name = "timm" },
+ { name = "torch" },
+ { name = "torchdiffeq" },
+ { name = "torchvision" },
+ { name = "transformers" },
+ { name = "triton" },
+ { name = "wandb" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "accelerate", specifier = ">=0.33.0" },
+ { name = "bitsandbytes", specifier = ">=0.47.0" },
+ { name = "diffusers", specifier = "==0.33.1" },
+ { name = "einops", specifier = ">=0.8.1" },
+ { name = "flash-attn", specifier = ">=2.8.3" },
+ { name = "gradio", specifier = ">=5.44.1" },
+ { name = "imageio", specifier = "==2.34.2" },
+ { name = "imageio-ffmpeg", specifier = "==0.5.1" },
+ { name = "moviepy", specifier = "==1.0.3" },
+ { name = "numpy", specifier = "==1.26.0" },
+ { name = "omegaconf", specifier = ">=2.3.0" },
+ { name = "pillow", specifier = "==9.5.0" },
+ { name = "safetensors", specifier = ">=0.6.2" },
+ { name = "sentencepiece", specifier = ">=0.2.0" },
+ { name = "spaces", specifier = ">=0.40.1" },
+ { name = "streamlit", specifier = ">=1.38.0" },
+ { name = "timm", specifier = ">=1.0.19" },
+ { name = "torch", specifier = ">=2.8.0" },
+ { name = "torchdiffeq", specifier = ">=0.2.5" },
+ { name = "torchvision", specifier = ">=0.23.0" },
+ { name = "transformers", specifier = ">=4.44.2" },
+ { name = "triton", specifier = ">=3.4.0" },
+ { name = "wandb", specifier = ">=0.21.3" },
+]
+
+[[package]]
+name = "timm"
+version = "1.0.19"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "huggingface-hub" },
+ { name = "pyyaml" },
+ { name = "safetensors" },
+ { name = "torch" },
+ { name = "torchvision" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1c/78/0789838cf20ba1cc09907914a008c1823d087132b48aa1ccde5e7934175a/timm-1.0.19.tar.gz", hash = "sha256:6e71e1f67ac80c229d3a78ca58347090514c508aeba8f2e2eb5289eda86e9f43", size = 2353261, upload-time = "2025-07-24T03:04:05.281Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/dc/74/661c63260cccf19ed5932e8b3f22f95ecd8bb34b9d9e6af9e1e7b961f254/timm-1.0.19-py3-none-any.whl", hash = "sha256:c07b56c32f3d3226c656f75c1b5479c08eb34eefed927c82fd8751a852f47931", size = 2497950, upload-time = "2025-07-24T03:04:03.097Z" },
+]
+
+[[package]]
+name = "tokenizers"
+version = "0.22.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "huggingface-hub" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/5e/b4/c1ce3699e81977da2ace8b16d2badfd42b060e7d33d75c4ccdbf9dc920fa/tokenizers-0.22.0.tar.gz", hash = "sha256:2e33b98525be8453f355927f3cab312c36cd3e44f4d7e9e97da2fa94d0a49dcb", size = 362771, upload-time = "2025-08-29T10:25:33.914Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6d/b1/18c13648edabbe66baa85fe266a478a7931ddc0cd1ba618802eb7b8d9865/tokenizers-0.22.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:eaa9620122a3fb99b943f864af95ed14c8dfc0f47afa3b404ac8c16b3f2bb484", size = 3081954, upload-time = "2025-08-29T10:25:24.993Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/02/c3c454b641bd7c4f79e4464accfae9e7dfc913a777d2e561e168ae060362/tokenizers-0.22.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:71784b9ab5bf0ff3075bceeb198149d2c5e068549c0d18fe32d06ba0deb63f79", size = 2945644, upload-time = "2025-08-29T10:25:23.405Z" },
+ { url = "https://files.pythonhosted.org/packages/55/02/d10185ba2fd8c2d111e124c9d92de398aee0264b35ce433f79fb8472f5d0/tokenizers-0.22.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec5b71f668a8076802b0241a42387d48289f25435b86b769ae1837cad4172a17", size = 3254764, upload-time = "2025-08-29T10:25:12.445Z" },
+ { url = "https://files.pythonhosted.org/packages/13/89/17514bd7ef4bf5bfff58e2b131cec0f8d5cea2b1c8ffe1050a2c8de88dbb/tokenizers-0.22.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ea8562fa7498850d02a16178105b58803ea825b50dc9094d60549a7ed63654bb", size = 3161654, upload-time = "2025-08-29T10:25:15.493Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/d8/bac9f3a7ef6dcceec206e3857c3b61bb16c6b702ed7ae49585f5bd85c0ef/tokenizers-0.22.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4136e1558a9ef2e2f1de1555dcd573e1cbc4a320c1a06c4107a3d46dc8ac6e4b", size = 3511484, upload-time = "2025-08-29T10:25:20.477Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/27/9c9800eb6763683010a4851db4d1802d8cab9cec114c17056eccb4d4a6e0/tokenizers-0.22.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf5954de3962a5fd9781dc12048d24a1a6f1f5df038c6e95db328cd22964206", size = 3712829, upload-time = "2025-08-29T10:25:17.154Z" },
+ { url = "https://files.pythonhosted.org/packages/10/e3/b1726dbc1f03f757260fa21752e1921445b5bc350389a8314dd3338836db/tokenizers-0.22.0-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8337ca75d0731fc4860e6204cc24bb36a67d9736142aa06ed320943b50b1e7ed", size = 3408934, upload-time = "2025-08-29T10:25:18.76Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/61/aeab3402c26874b74bb67a7f2c4b569dde29b51032c5384db592e7b216f4/tokenizers-0.22.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a89264e26f63c449d8cded9061adea7b5de53ba2346fc7e87311f7e4117c1cc8", size = 3345585, upload-time = "2025-08-29T10:25:22.08Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/d3/498b4a8a8764cce0900af1add0f176ff24f475d4413d55b760b8cdf00893/tokenizers-0.22.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:790bad50a1b59d4c21592f9c3cf5e5cf9c3c7ce7e1a23a739f13e01fb1be377a", size = 9322986, upload-time = "2025-08-29T10:25:26.607Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/62/92378eb1c2c565837ca3cb5f9569860d132ab9d195d7950c1ea2681dffd0/tokenizers-0.22.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:76cf6757c73a10ef10bf06fa937c0ec7393d90432f543f49adc8cab3fb6f26cb", size = 9276630, upload-time = "2025-08-29T10:25:28.349Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/f0/342d80457aa1cda7654327460f69db0d69405af1e4c453f4dc6ca7c4a76e/tokenizers-0.22.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:1626cb186e143720c62c6c6b5371e62bbc10af60481388c0da89bc903f37ea0c", size = 9547175, upload-time = "2025-08-29T10:25:29.989Z" },
+ { url = "https://files.pythonhosted.org/packages/14/84/8aa9b4adfc4fbd09381e20a5bc6aa27040c9c09caa89988c01544e008d18/tokenizers-0.22.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:da589a61cbfea18ae267723d6b029b84598dc8ca78db9951d8f5beff72d8507c", size = 9692735, upload-time = "2025-08-29T10:25:32.089Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/24/83ee2b1dc76bfe05c3142e7d0ccdfe69f0ad2f1ebf6c726cea7f0874c0d0/tokenizers-0.22.0-cp39-abi3-win32.whl", hash = "sha256:dbf9d6851bddae3e046fedfb166f47743c1c7bd11c640f0691dd35ef0bcad3be", size = 2471915, upload-time = "2025-08-29T10:25:36.411Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/9b/0e0bf82214ee20231845b127aa4a8015936ad5a46779f30865d10e404167/tokenizers-0.22.0-cp39-abi3-win_amd64.whl", hash = "sha256:c78174859eeaee96021f248a56c801e36bfb6bd5b067f2e95aa82445ca324f00", size = 2680494, upload-time = "2025-08-29T10:25:35.14Z" },
+]
+
+[[package]]
+name = "toml"
+version = "0.10.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" },
+]
+
+[[package]]
+name = "tomlkit"
+version = "0.13.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" },
+]
+
+[[package]]
+name = "torch"
+version = "2.8.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "filelock" },
+ { name = "fsspec" },
+ { name = "jinja2" },
+ { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "networkx", version = "3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "setuptools", marker = "python_full_version >= '3.12'" },
+ { name = "sympy" },
+ { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "typing-extensions" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/63/28/110f7274254f1b8476c561dada127173f994afa2b1ffc044efb773c15650/torch-2.8.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:0be92c08b44009d4131d1ff7a8060d10bafdb7ddcb7359ef8d8c5169007ea905", size = 102052793, upload-time = "2025-08-06T14:53:15.852Z" },
+ { url = "https://files.pythonhosted.org/packages/70/1c/58da560016f81c339ae14ab16c98153d51c941544ae568da3cb5b1ceb572/torch-2.8.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:89aa9ee820bb39d4d72b794345cccef106b574508dd17dbec457949678c76011", size = 888025420, upload-time = "2025-08-06T14:54:18.014Z" },
+ { url = "https://files.pythonhosted.org/packages/70/87/f69752d0dd4ba8218c390f0438130c166fa264a33b7025adb5014b92192c/torch-2.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e8e5bf982e87e2b59d932769938b698858c64cc53753894be25629bdf5cf2f46", size = 241363614, upload-time = "2025-08-06T14:53:31.496Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/d6/e6d4c57e61c2b2175d3aafbfb779926a2cfd7c32eeda7c543925dceec923/torch-2.8.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:a3f16a58a9a800f589b26d47ee15aca3acf065546137fc2af039876135f4c760", size = 73611154, upload-time = "2025-08-06T14:53:10.919Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/c4/3e7a3887eba14e815e614db70b3b529112d1513d9dae6f4d43e373360b7f/torch-2.8.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:220a06fd7af8b653c35d359dfe1aaf32f65aa85befa342629f716acb134b9710", size = 102073391, upload-time = "2025-08-06T14:53:20.937Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/63/4fdc45a0304536e75a5e1b1bbfb1b56dd0e2743c48ee83ca729f7ce44162/torch-2.8.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c12fa219f51a933d5f80eeb3a7a5d0cbe9168c0a14bbb4055f1979431660879b", size = 888063640, upload-time = "2025-08-06T14:55:05.325Z" },
+ { url = "https://files.pythonhosted.org/packages/84/57/2f64161769610cf6b1c5ed782bd8a780e18a3c9d48931319f2887fa9d0b1/torch-2.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:8c7ef765e27551b2fbfc0f41bcf270e1292d9bf79f8e0724848b1682be6e80aa", size = 241366752, upload-time = "2025-08-06T14:53:38.692Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/5e/05a5c46085d9b97e928f3f037081d3d2b87fb4b4195030fc099aaec5effc/torch-2.8.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:5ae0524688fb6707c57a530c2325e13bb0090b745ba7b4a2cd6a3ce262572916", size = 73621174, upload-time = "2025-08-06T14:53:25.44Z" },
+ { url = "https://files.pythonhosted.org/packages/49/0c/2fd4df0d83a495bb5e54dca4474c4ec5f9c62db185421563deeb5dabf609/torch-2.8.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:e2fab4153768d433f8ed9279c8133a114a034a61e77a3a104dcdf54388838705", size = 101906089, upload-time = "2025-08-06T14:53:52.631Z" },
+ { url = "https://files.pythonhosted.org/packages/99/a8/6acf48d48838fb8fe480597d98a0668c2beb02ee4755cc136de92a0a956f/torch-2.8.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2aca0939fb7e4d842561febbd4ffda67a8e958ff725c1c27e244e85e982173c", size = 887913624, upload-time = "2025-08-06T14:56:44.33Z" },
+ { url = "https://files.pythonhosted.org/packages/af/8a/5c87f08e3abd825c7dfecef5a0f1d9aa5df5dd0e3fd1fa2f490a8e512402/torch-2.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:2f4ac52f0130275d7517b03a33d2493bab3693c83dcfadf4f81688ea82147d2e", size = 241326087, upload-time = "2025-08-06T14:53:46.503Z" },
+ { url = "https://files.pythonhosted.org/packages/be/66/5c9a321b325aaecb92d4d1855421e3a055abd77903b7dab6575ca07796db/torch-2.8.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:619c2869db3ada2c0105487ba21b5008defcc472d23f8b80ed91ac4a380283b0", size = 73630478, upload-time = "2025-08-06T14:53:57.144Z" },
+ { url = "https://files.pythonhosted.org/packages/10/4e/469ced5a0603245d6a19a556e9053300033f9c5baccf43a3d25ba73e189e/torch-2.8.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2b2f96814e0345f5a5aed9bf9734efa913678ed19caf6dc2cddb7930672d6128", size = 101936856, upload-time = "2025-08-06T14:54:01.526Z" },
+ { url = "https://files.pythonhosted.org/packages/16/82/3948e54c01b2109238357c6f86242e6ecbf0c63a1af46906772902f82057/torch-2.8.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:65616ca8ec6f43245e1f5f296603e33923f4c30f93d65e103d9e50c25b35150b", size = 887922844, upload-time = "2025-08-06T14:55:50.78Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/54/941ea0a860f2717d86a811adf0c2cd01b3983bdd460d0803053c4e0b8649/torch-2.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:659df54119ae03e83a800addc125856effda88b016dfc54d9f65215c3975be16", size = 241330968, upload-time = "2025-08-06T14:54:45.293Z" },
+ { url = "https://files.pythonhosted.org/packages/de/69/8b7b13bba430f5e21d77708b616f767683629fc4f8037564a177d20f90ed/torch-2.8.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:1a62a1ec4b0498930e2543535cf70b1bef8c777713de7ceb84cd79115f553767", size = 73915128, upload-time = "2025-08-06T14:54:34.769Z" },
+ { url = "https://files.pythonhosted.org/packages/15/0e/8a800e093b7f7430dbaefa80075aee9158ec22e4c4fc3c1a66e4fb96cb4f/torch-2.8.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:83c13411a26fac3d101fe8035a6b0476ae606deb8688e904e796a3534c197def", size = 102020139, upload-time = "2025-08-06T14:54:39.047Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/15/5e488ca0bc6162c86a33b58642bc577c84ded17c7b72d97e49b5833e2d73/torch-2.8.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:8f0a9d617a66509ded240add3754e462430a6c1fc5589f86c17b433dd808f97a", size = 887990692, upload-time = "2025-08-06T14:56:18.286Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/a8/6a04e4b54472fc5dba7ca2341ab219e529f3c07b6941059fbf18dccac31f/torch-2.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a7242b86f42be98ac674b88a4988643b9bc6145437ec8f048fea23f72feb5eca", size = 241603453, upload-time = "2025-08-06T14:55:22.945Z" },
+ { url = "https://files.pythonhosted.org/packages/04/6e/650bb7f28f771af0cb791b02348db8b7f5f64f40f6829ee82aa6ce99aabe/torch-2.8.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:7b677e17f5a3e69fdef7eb3b9da72622f8d322692930297e4ccb52fefc6c8211", size = 73632395, upload-time = "2025-08-06T14:55:28.645Z" },
+]
+
+[[package]]
+name = "torchdiffeq"
+version = "0.2.5"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "scipy", version = "1.16.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "torch" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/87/ec/a40aa124660f0ee65e6760cb53df6a82ad91a1a3ef1da5e747f1336644dd/torchdiffeq-0.2.5.tar.gz", hash = "sha256:b50d3760d13fd138dcceac651f4b80396f44fefcebd037a033fecfeaa9cc12e7", size = 31197, upload-time = "2024-11-21T20:20:11.552Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b9/35/537f64f2d0b3cfebaae0f903b4e3a3b239abcc99d0f73cb15b9cee9b8212/torchdiffeq-0.2.5-py3-none-any.whl", hash = "sha256:aa1db4bed13bd04952f28a53cdf4336d1ab60417c1d9698d7a239fec1cf2bcf8", size = 32902, upload-time = "2024-11-21T20:20:09.938Z" },
+]
+
+[[package]]
+name = "torchvision"
+version = "0.23.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy" },
+ { name = "pillow" },
+ { name = "torch" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4d/49/5ad5c3ff4920be0adee9eb4339b4fb3b023a0fc55b9ed8dbc73df92946b8/torchvision-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7266871daca00ad46d1c073e55d972179d12a58fa5c9adec9a3db9bbed71284a", size = 1856885, upload-time = "2025-08-06T14:57:55.024Z" },
+ { url = "https://files.pythonhosted.org/packages/25/44/ddd56d1637bac42a8c5da2c8c440d8a28c431f996dd9790f32dd9a96ca6e/torchvision-0.23.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:31c583ba27426a3a04eca8c05450524105c1564db41be6632f7536ef405a6de2", size = 2394251, upload-time = "2025-08-06T14:58:01.725Z" },
+ { url = "https://files.pythonhosted.org/packages/93/f3/3cdf55bbf0f737304d997561c34ab0176222e0496b6743b0feab5995182c/torchvision-0.23.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:3932bf67256f2d095ce90a9f826f6033694c818856f4bb26794cf2ce64253e53", size = 8627497, upload-time = "2025-08-06T14:58:09.317Z" },
+ { url = "https://files.pythonhosted.org/packages/97/90/02afe57c3ef4284c5cf89d3b7ae203829b3a981f72b93a7dd2a3fd2c83c1/torchvision-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:83ee5bf827d61a8af14620c0a61d8608558638ac9c3bac8adb7b27138e2147d1", size = 1600760, upload-time = "2025-08-06T14:57:56.783Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/d7/15d3d7bd8d0239211b21673d1bac7bc345a4ad904a8e25bb3fd8a9cf1fbc/torchvision-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49aa20e21f0c2bd458c71d7b449776cbd5f16693dd5807195a820612b8a229b7", size = 1856884, upload-time = "2025-08-06T14:58:00.237Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/14/7b44fe766b7d11e064c539d92a172fa9689a53b69029e24f2f1f51e7dc56/torchvision-0.23.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:01dc33ee24c79148aee7cdbcf34ae8a3c9da1674a591e781577b716d233b1fa6", size = 2395543, upload-time = "2025-08-06T14:58:04.373Z" },
+ { url = "https://files.pythonhosted.org/packages/79/9c/fcb09aff941c8147d9e6aa6c8f67412a05622b0c750bcf796be4c85a58d4/torchvision-0.23.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:35c27941831b653f5101edfe62c03d196c13f32139310519e8228f35eae0e96a", size = 8628388, upload-time = "2025-08-06T14:58:07.802Z" },
+ { url = "https://files.pythonhosted.org/packages/93/40/3415d890eb357b25a8e0a215d32365a88ecc75a283f75c4e919024b22d97/torchvision-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:09bfde260e7963a15b80c9e442faa9f021c7e7f877ac0a36ca6561b367185013", size = 1600741, upload-time = "2025-08-06T14:57:59.158Z" },
+ { url = "https://files.pythonhosted.org/packages/df/1d/0ea0b34bde92a86d42620f29baa6dcbb5c2fc85990316df5cb8f7abb8ea2/torchvision-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e0e2c04a91403e8dd3af9756c6a024a1d9c0ed9c0d592a8314ded8f4fe30d440", size = 1856885, upload-time = "2025-08-06T14:58:06.503Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/00/2f6454decc0cd67158c7890364e446aad4b91797087a57a78e72e1a8f8bc/torchvision-0.23.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:6dd7c4d329a0e03157803031bc856220c6155ef08c26d4f5bbac938acecf0948", size = 2396614, upload-time = "2025-08-06T14:58:03.116Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/b5/3e580dcbc16f39a324f3dd71b90edbf02a42548ad44d2b4893cc92b1194b/torchvision-0.23.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4e7d31c43bc7cbecbb1a5652ac0106b436aa66e26437585fc2c4b2cf04d6014c", size = 8627108, upload-time = "2025-08-06T14:58:12.956Z" },
+ { url = "https://files.pythonhosted.org/packages/82/c1/c2fe6d61e110a8d0de2f94276899a2324a8f1e6aee559eb6b4629ab27466/torchvision-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:a2e45272abe7b8bf0d06c405e78521b5757be1bd0ed7e5cd78120f7fdd4cbf35", size = 1600723, upload-time = "2025-08-06T14:57:57.986Z" },
+ { url = "https://files.pythonhosted.org/packages/91/37/45a5b9407a7900f71d61b2b2f62db4b7c632debca397f205fdcacb502780/torchvision-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1c37e325e09a184b730c3ef51424f383ec5745378dc0eca244520aca29722600", size = 1856886, upload-time = "2025-08-06T14:58:05.491Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/da/a06c60fc84fc849377cf035d3b3e9a1c896d52dbad493b963c0f1cdd74d0/torchvision-0.23.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2f7fd6c15f3697e80627b77934f77705f3bc0e98278b989b2655de01f6903e1d", size = 2353112, upload-time = "2025-08-06T14:58:26.265Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/27/5ce65ba5c9d3b7d2ccdd79892ab86a2f87ac2ca6638f04bb0280321f1a9c/torchvision-0.23.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:a76fafe113b2977be3a21bf78f115438c1f88631d7a87203acb3dd6ae55889e6", size = 8627658, upload-time = "2025-08-06T14:58:15.999Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/e4/028a27b60aa578a2fa99d9d7334ff1871bb17008693ea055a2fdee96da0d/torchvision-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:07d069cb29691ff566e3b7f11f20d91044f079e1dbdc9d72e0655899a9b06938", size = 1600749, upload-time = "2025-08-06T14:58:10.719Z" },
+ { url = "https://files.pythonhosted.org/packages/05/35/72f91ad9ac7c19a849dedf083d347dc1123f0adeb401f53974f84f1d04c8/torchvision-0.23.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:2df618e1143805a7673aaf82cb5720dd9112d4e771983156aaf2ffff692eebf9", size = 2047192, upload-time = "2025-08-06T14:58:11.813Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/9d/406cea60a9eb9882145bcd62a184ee61e823e8e1d550cdc3c3ea866a9445/torchvision-0.23.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:2a3299d2b1d5a7aed2d3b6ffb69c672ca8830671967eb1cee1497bacd82fe47b", size = 2359295, upload-time = "2025-08-06T14:58:17.469Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/f4/34662f71a70fa1e59de99772142f22257ca750de05ccb400b8d2e3809c1d/torchvision-0.23.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:76bc4c0b63d5114aa81281390f8472a12a6a35ce9906e67ea6044e5af4cab60c", size = 8800474, upload-time = "2025-08-06T14:58:22.53Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/f5/b5a2d841a8d228b5dbda6d524704408e19e7ca6b7bb0f24490e081da1fa1/torchvision-0.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b9e2dabf0da9c8aa9ea241afb63a8f3e98489e706b22ac3f30416a1be377153b", size = 1527667, upload-time = "2025-08-06T14:58:14.446Z" },
+]
+
+[[package]]
+name = "tornado"
+version = "6.5.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/09/ce/1eb500eae19f4648281bb2186927bb062d2438c2e5093d1360391afd2f90/tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", size = 510821, upload-time = "2025-08-08T18:27:00.78Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f6/48/6a7529df2c9cc12efd2e8f5dd219516184d703b34c06786809670df5b3bd/tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", size = 442563, upload-time = "2025-08-08T18:26:42.945Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/b5/9b575a0ed3e50b00c40b08cbce82eb618229091d09f6d14bce80fc01cb0b/tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", size = 440729, upload-time = "2025-08-08T18:26:44.473Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/4e/619174f52b120efcf23633c817fd3fed867c30bff785e2cd5a53a70e483c/tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", size = 444295, upload-time = "2025-08-08T18:26:46.021Z" },
+ { url = "https://files.pythonhosted.org/packages/95/fa/87b41709552bbd393c85dd18e4e3499dcd8983f66e7972926db8d96aa065/tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", size = 443644, upload-time = "2025-08-08T18:26:47.625Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/41/fb15f06e33d7430ca89420283a8762a4e6b8025b800ea51796ab5e6d9559/tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108", size = 443878, upload-time = "2025-08-08T18:26:50.599Z" },
+ { url = "https://files.pythonhosted.org/packages/11/92/fe6d57da897776ad2e01e279170ea8ae726755b045fe5ac73b75357a5a3f/tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", size = 444549, upload-time = "2025-08-08T18:26:51.864Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/02/c8f4f6c9204526daf3d760f4aa555a7a33ad0e60843eac025ccfd6ff4a93/tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", size = 443973, upload-time = "2025-08-08T18:26:53.625Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/2d/f5f5707b655ce2317190183868cd0f6822a1121b4baeae509ceb9590d0bd/tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", size = 443954, upload-time = "2025-08-08T18:26:55.072Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/59/593bd0f40f7355806bf6573b47b8c22f8e1374c9b6fd03114bd6b7a3dcfd/tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", size = 445023, upload-time = "2025-08-08T18:26:56.677Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/2a/f609b420c2f564a748a2d80ebfb2ee02a73ca80223af712fca591386cafb/tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", size = 445427, upload-time = "2025-08-08T18:26:57.91Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/4f/e1f65e8f8c76d73658b33d33b81eed4322fb5085350e4328d5c956f0c8f9/tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", size = 444456, upload-time = "2025-08-08T18:26:59.207Z" },
+]
+
+[[package]]
+name = "tqdm"
+version = "4.67.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
+]
+
+[[package]]
+name = "transformers"
+version = "4.56.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "filelock" },
+ { name = "huggingface-hub" },
+ { name = "numpy" },
+ { name = "packaging" },
+ { name = "pyyaml" },
+ { name = "regex" },
+ { name = "requests" },
+ { name = "safetensors" },
+ { name = "tokenizers" },
+ { name = "tqdm" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/89/21/dc88ef3da1e49af07ed69386a11047a31dcf1aaf4ded3bc4b173fbf94116/transformers-4.56.1.tar.gz", hash = "sha256:0d88b1089a563996fc5f2c34502f10516cad3ea1aa89f179f522b54c8311fe74", size = 9855473, upload-time = "2025-09-04T20:47:13.14Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/71/7c/283c3dd35e00e22a7803a0b2a65251347b745474a82399be058bde1c9f15/transformers-4.56.1-py3-none-any.whl", hash = "sha256:1697af6addfb6ddbce9618b763f4b52d5a756f6da4899ffd1b4febf58b779248", size = 11608197, upload-time = "2025-09-04T20:47:04.895Z" },
+]
+
+[[package]]
+name = "triton"
+version = "3.4.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "setuptools" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/62/ee/0ee5f64a87eeda19bbad9bc54ae5ca5b98186ed00055281fd40fb4beb10e/triton-3.4.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ff2785de9bc02f500e085420273bb5cc9c9bb767584a4aa28d6e360cec70128", size = 155430069, upload-time = "2025-07-30T19:58:21.715Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/39/43325b3b651d50187e591eefa22e236b2981afcebaefd4f2fc0ea99df191/triton-3.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b70f5e6a41e52e48cfc087436c8a28c17ff98db369447bcaff3b887a3ab4467", size = 155531138, upload-time = "2025-07-30T19:58:29.908Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/66/b1eb52839f563623d185f0927eb3530ee4d5ffe9d377cdaf5346b306689e/triton-3.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:31c1d84a5c0ec2c0f8e8a072d7fd150cab84a9c239eaddc6706c081bfae4eb04", size = 155560068, upload-time = "2025-07-30T19:58:37.081Z" },
+ { url = "https://files.pythonhosted.org/packages/30/7b/0a685684ed5322d2af0bddefed7906674f67974aa88b0fae6e82e3b766f6/triton-3.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00be2964616f4c619193cb0d1b29a99bd4b001d7dc333816073f92cf2a8ccdeb", size = 155569223, upload-time = "2025-07-30T19:58:44.017Z" },
+ { url = "https://files.pythonhosted.org/packages/20/63/8cb444ad5cdb25d999b7d647abac25af0ee37d292afc009940c05b82dda0/triton-3.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7936b18a3499ed62059414d7df563e6c163c5e16c3773678a3ee3d417865035d", size = 155659780, upload-time = "2025-07-30T19:58:51.171Z" },
+]
+
+[[package]]
+name = "typer"
+version = "0.17.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "rich" },
+ { name = "shellingham" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/92/e8/2a73ccf9874ec4c7638f172efc8972ceab13a0e3480b389d6ed822f7a822/typer-0.17.4.tar.gz", hash = "sha256:b77dc07d849312fd2bb5e7f20a7af8985c7ec360c45b051ed5412f64d8dc1580", size = 103734, upload-time = "2025-09-05T18:14:40.746Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/93/72/6b3e70d32e89a5cbb6a4513726c1ae8762165b027af569289e19ec08edd8/typer-0.17.4-py3-none-any.whl", hash = "sha256:015534a6edaa450e7007eba705d5c18c3349dcea50a6ad79a5ed530967575824", size = 46643, upload-time = "2025-09-05T18:14:39.166Z" },
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.15.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
+]
+
+[[package]]
+name = "typing-inspection"
+version = "0.4.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" },
+]
+
+[[package]]
+name = "tzdata"
+version = "2025.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.5.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
+]
+
+[[package]]
+name = "uvicorn"
+version = "0.35.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "h11" },
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" },
+]
+
+[[package]]
+name = "wandb"
+version = "0.21.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "gitpython" },
+ { name = "packaging" },
+ { name = "platformdirs" },
+ { name = "protobuf" },
+ { name = "pydantic" },
+ { name = "pyyaml" },
+ { name = "requests" },
+ { name = "sentry-sdk" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/2f/84/af6ccdf95e56f15aceb360e437fbfcca3dc91ad8ca335fe482083e29f7a5/wandb-0.21.3.tar.gz", hash = "sha256:031e24e2aad0ce735dfdcc74baf2f2c12c106f500ed24798de6ef9b9e63bb432", size = 40146972, upload-time = "2025-08-30T18:21:55.138Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/aa/e8/b5bfbbc7f76c11fd0665b92be8a38c6a83b27f353552233b9959b21be488/wandb-0.21.3-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:f85bac45b4482742ec9ff190af38eb00a877ddeb4875475e7e487dc19300ff03", size = 18820209, upload-time = "2025-08-30T18:21:33.47Z" },
+ { url = "https://files.pythonhosted.org/packages/59/a3/03f0fcde49609df1cb3a382fb5053f601b88da448bcd415ed7f75272eee7/wandb-0.21.3-py3-none-macosx_12_0_arm64.whl", hash = "sha256:8a2b3ba419b91d47edead2755f04cef54f9e3c4496ee0c9854c3cfeff4216dd3", size = 18310636, upload-time = "2025-08-30T18:21:37.405Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/c3/d6048db30ff2e3c67089ba0e94878572fd26137b146f8e3b27bbdf428b31/wandb-0.21.3-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:35a1972881f3b85755befab004118234593792a9f05e07fd6345780172f4420e", size = 19053277, upload-time = "2025-08-30T18:21:39.389Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/7f/805c3d2fa9e3b8b6bf2bc534887c9ed97bdf22007ca8ba59424a1c8bb360/wandb-0.21.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d9cf8588cb090a2a41f589037fda72c57c9e23edfbd2ad829e575f1305d942c", size = 18130850, upload-time = "2025-08-30T18:21:41.573Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/af/a3252e5afac98a036f83c65ec92cadf6677ccdaacbbb2151da29f694d136/wandb-0.21.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff24b6b8e0f9da840b6bd5c7f60b0a5507bd998db40c9c2d476f9a340bec8ed", size = 19570305, upload-time = "2025-08-30T18:21:43.811Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/f9/4404b5a24bfd4ba027c19d30152b0fc7ebca8c49b202dee6ecb7f316082c/wandb-0.21.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4975dec19e2b343e23ed6e60f7e1290120553719f82e87a22205bede758416ad", size = 18135806, upload-time = "2025-08-30T18:21:46.211Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/32/9580f42899e54f3d0b4ea619b6f6a54980a4e36fd0675d58c09f0a08d3f6/wandb-0.21.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:514a0aad40ecc0bdb757b1dc86e4ac98f61d2d760445b6e1f555291562320f2d", size = 19646760, upload-time = "2025-08-30T18:21:48.768Z" },
+ { url = "https://files.pythonhosted.org/packages/75/d3/faa6ddb792a158c154fb704b25c96d0478e71eabf96e3f17529fb23b6894/wandb-0.21.3-py3-none-win32.whl", hash = "sha256:45aa3d8ad53c6ee06f37490d7a329ed7d0f5ca4dbd5d05bb0c01d5da22f14691", size = 18709408, upload-time = "2025-08-30T18:21:50.859Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/2d/7ef56e25f78786e59fefd9b19867c325f9686317d9f7b93b5cb340360a3e/wandb-0.21.3-py3-none-win_amd64.whl", hash = "sha256:56d5a5697766f552a9933d8c6a564202194768eb0389bd5f9fe9a99cd4cee41e", size = 18709411, upload-time = "2025-08-30T18:21:52.874Z" },
+]
+
+[[package]]
+name = "watchdog"
+version = "6.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" },
+ { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" },
+ { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" },
+ { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" },
+ { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
+]
+
+[[package]]
+name = "websockets"
+version = "15.0.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1e/da/6462a9f510c0c49837bbc9345aca92d767a56c1fb2939e1579df1e1cdcf7/websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b", size = 175423, upload-time = "2025-03-05T20:01:35.363Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/9f/9d11c1a4eb046a9e106483b9ff69bce7ac880443f00e5ce64261b47b07e7/websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205", size = 173080, upload-time = "2025-03-05T20:01:37.304Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/4f/b462242432d93ea45f297b6179c7333dd0402b855a912a04e7fc61c0d71f/websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a", size = 173329, upload-time = "2025-03-05T20:01:39.668Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/0c/6afa1f4644d7ed50284ac59cc70ef8abd44ccf7d45850d989ea7310538d0/websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e", size = 182312, upload-time = "2025-03-05T20:01:41.815Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/d4/ffc8bd1350b229ca7a4db2a3e1c482cf87cea1baccd0ef3e72bc720caeec/websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf", size = 181319, upload-time = "2025-03-05T20:01:43.967Z" },
+ { url = "https://files.pythonhosted.org/packages/97/3a/5323a6bb94917af13bbb34009fac01e55c51dfde354f63692bf2533ffbc2/websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb", size = 181631, upload-time = "2025-03-05T20:01:46.104Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/cc/1aeb0f7cee59ef065724041bb7ed667b6ab1eeffe5141696cccec2687b66/websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d", size = 182016, upload-time = "2025-03-05T20:01:47.603Z" },
+ { url = "https://files.pythonhosted.org/packages/79/f9/c86f8f7af208e4161a7f7e02774e9d0a81c632ae76db2ff22549e1718a51/websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9", size = 181426, upload-time = "2025-03-05T20:01:48.949Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/b9/828b0bc6753db905b91df6ae477c0b14a141090df64fb17f8a9d7e3516cf/websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c", size = 181360, upload-time = "2025-03-05T20:01:50.938Z" },
+ { url = "https://files.pythonhosted.org/packages/89/fb/250f5533ec468ba6327055b7d98b9df056fb1ce623b8b6aaafb30b55d02e/websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256", size = 176388, upload-time = "2025-03-05T20:01:52.213Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/46/aca7082012768bb98e5608f01658ff3ac8437e563eca41cf068bd5849a5e/websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41", size = 176830, upload-time = "2025-03-05T20:01:53.922Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" },
+ { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" },
+ { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" },
+ { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" },
+ { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" },
+ { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" },
+ { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" },
+ { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" },
+ { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" },
+ { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" },
+ { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" },
+ { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" },
+ { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" },
+ { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" },
+ { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" },
+ { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" },
+ { url = "https://files.pythonhosted.org/packages/02/9e/d40f779fa16f74d3468357197af8d6ad07e7c5a27ea1ca74ceb38986f77a/websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3", size = 173109, upload-time = "2025-03-05T20:03:17.769Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/cd/5b887b8585a593073fd92f7c23ecd3985cd2c3175025a91b0d69b0551372/websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1", size = 173343, upload-time = "2025-03-05T20:03:19.094Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/ae/d34f7556890341e900a95acf4886833646306269f899d58ad62f588bf410/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475", size = 174599, upload-time = "2025-03-05T20:03:21.1Z" },
+ { url = "https://files.pythonhosted.org/packages/71/e6/5fd43993a87db364ec60fc1d608273a1a465c0caba69176dd160e197ce42/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9", size = 174207, upload-time = "2025-03-05T20:03:23.221Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/fb/c492d6daa5ec067c2988ac80c61359ace5c4c674c532985ac5a123436cec/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04", size = 174155, upload-time = "2025-03-05T20:03:25.321Z" },
+ { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" },
+]
+
+[[package]]
+name = "zipp"
+version = "3.23.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" },
+]