__key__
stringlengths
32
39
stainer
stringclasses
1 value
scanner
stringclasses
4 values
slide_id
stringclasses
4 values
tile_id
stringlengths
12
15
png
imagewidth (px)
224
224
./GIVH_AT2_to_GMH_S60/tile_16_100_127
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_127
./GIVH_AT2_to_GMH_S60/tile_16_100_128
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_128
./GIVH_AT2_to_GMH_S60/tile_16_100_129
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_129
./GIVH_AT2_to_GMH_S60/tile_16_100_130
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_130
./GIVH_AT2_to_GMH_S60/tile_16_100_131
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_131
./GIVH_AT2_to_GMH_S60/tile_16_100_132
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_132
./GIVH_AT2_to_GMH_S60/tile_16_100_133
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_133
./GIVH_AT2_to_GMH_S60/tile_16_100_134
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_134
./GIVH_AT2_to_GMH_S60/tile_16_100_135
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_135
./GIVH_AT2_to_GMH_S60/tile_16_100_136
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_136
./GIVH_AT2_to_GMH_S60/tile_16_100_137
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_100_137
./GIVH_AT2_to_GMH_S60/tile_16_101_126
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_126
./GIVH_AT2_to_GMH_S60/tile_16_101_127
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_127
./GIVH_AT2_to_GMH_S60/tile_16_101_128
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_128
./GIVH_AT2_to_GMH_S60/tile_16_101_129
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_129
./GIVH_AT2_to_GMH_S60/tile_16_101_130
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_130
./GIVH_AT2_to_GMH_S60/tile_16_101_131
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_131
./GIVH_AT2_to_GMH_S60/tile_16_101_132
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_132
./GIVH_AT2_to_GMH_S60/tile_16_101_133
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_133
./GIVH_AT2_to_GMH_S60/tile_16_101_134
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_134
./GIVH_AT2_to_GMH_S60/tile_16_101_135
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_135
./GIVH_AT2_to_GMH_S60/tile_16_101_136
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_136
./GIVH_AT2_to_GMH_S60/tile_16_101_137
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_137
./GIVH_AT2_to_GMH_S60/tile_16_101_138
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_138
./GIVH_AT2_to_GMH_S60/tile_16_101_25
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_25
./GIVH_AT2_to_GMH_S60/tile_16_101_26
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_26
./GIVH_AT2_to_GMH_S60/tile_16_101_27
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_101_27
./GIVH_AT2_to_GMH_S60/tile_16_102_124
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_124
./GIVH_AT2_to_GMH_S60/tile_16_102_126
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_126
./GIVH_AT2_to_GMH_S60/tile_16_102_127
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_127
./GIVH_AT2_to_GMH_S60/tile_16_102_128
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_128
./GIVH_AT2_to_GMH_S60/tile_16_102_129
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_129
./GIVH_AT2_to_GMH_S60/tile_16_102_130
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_130
./GIVH_AT2_to_GMH_S60/tile_16_102_131
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_131
./GIVH_AT2_to_GMH_S60/tile_16_102_132
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_132
./GIVH_AT2_to_GMH_S60/tile_16_102_133
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_133
./GIVH_AT2_to_GMH_S60/tile_16_102_134
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_134
./GIVH_AT2_to_GMH_S60/tile_16_102_135
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_135
./GIVH_AT2_to_GMH_S60/tile_16_102_136
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_136
./GIVH_AT2_to_GMH_S60/tile_16_102_137
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_137
./GIVH_AT2_to_GMH_S60/tile_16_102_138
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_138
./GIVH_AT2_to_GMH_S60/tile_16_102_139
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_139
./GIVH_AT2_to_GMH_S60/tile_16_102_140
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_102_140
./GIVH_AT2_to_GMH_S60/tile_16_103_124
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_124
./GIVH_AT2_to_GMH_S60/tile_16_103_125
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_125
./GIVH_AT2_to_GMH_S60/tile_16_103_126
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_126
./GIVH_AT2_to_GMH_S60/tile_16_103_127
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_127
./GIVH_AT2_to_GMH_S60/tile_16_103_128
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_128
./GIVH_AT2_to_GMH_S60/tile_16_103_129
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_129
./GIVH_AT2_to_GMH_S60/tile_16_103_130
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_130
./GIVH_AT2_to_GMH_S60/tile_16_103_131
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_131
./GIVH_AT2_to_GMH_S60/tile_16_103_132
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_132
./GIVH_AT2_to_GMH_S60/tile_16_103_133
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_133
./GIVH_AT2_to_GMH_S60/tile_16_103_134
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_134
./GIVH_AT2_to_GMH_S60/tile_16_103_135
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_135
./GIVH_AT2_to_GMH_S60/tile_16_103_136
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_136
./GIVH_AT2_to_GMH_S60/tile_16_103_137
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_137
./GIVH_AT2_to_GMH_S60/tile_16_103_138
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_138
./GIVH_AT2_to_GMH_S60/tile_16_103_139
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_139
./GIVH_AT2_to_GMH_S60/tile_16_103_140
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_140
./GIVH_AT2_to_GMH_S60/tile_16_103_141
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_103_141
./GIVH_AT2_to_GMH_S60/tile_16_104_123
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_123
./GIVH_AT2_to_GMH_S60/tile_16_104_124
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_124
./GIVH_AT2_to_GMH_S60/tile_16_104_126
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_126
./GIVH_AT2_to_GMH_S60/tile_16_104_127
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_127
./GIVH_AT2_to_GMH_S60/tile_16_104_128
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_128
./GIVH_AT2_to_GMH_S60/tile_16_104_129
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_129
./GIVH_AT2_to_GMH_S60/tile_16_104_130
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_130
./GIVH_AT2_to_GMH_S60/tile_16_104_131
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_131
./GIVH_AT2_to_GMH_S60/tile_16_104_132
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_132
./GIVH_AT2_to_GMH_S60/tile_16_104_133
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_133
./GIVH_AT2_to_GMH_S60/tile_16_104_134
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_134
./GIVH_AT2_to_GMH_S60/tile_16_104_135
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_135
./GIVH_AT2_to_GMH_S60/tile_16_104_136
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_136
./GIVH_AT2_to_GMH_S60/tile_16_104_137
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_137
./GIVH_AT2_to_GMH_S60/tile_16_104_138
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_138
./GIVH_AT2_to_GMH_S60/tile_16_104_139
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_139
./GIVH_AT2_to_GMH_S60/tile_16_104_170
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_170
./GIVH_AT2_to_GMH_S60/tile_16_104_79
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_79
./GIVH_AT2_to_GMH_S60/tile_16_104_80
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_80
./GIVH_AT2_to_GMH_S60/tile_16_104_81
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_81
./GIVH_AT2_to_GMH_S60/tile_16_104_82
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_104_82
./GIVH_AT2_to_GMH_S60/tile_16_105_123
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_123
./GIVH_AT2_to_GMH_S60/tile_16_105_126
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_126
./GIVH_AT2_to_GMH_S60/tile_16_105_127
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_127
./GIVH_AT2_to_GMH_S60/tile_16_105_128
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_128
./GIVH_AT2_to_GMH_S60/tile_16_105_129
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_129
./GIVH_AT2_to_GMH_S60/tile_16_105_130
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_130
./GIVH_AT2_to_GMH_S60/tile_16_105_131
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_131
./GIVH_AT2_to_GMH_S60/tile_16_105_132
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_132
./GIVH_AT2_to_GMH_S60/tile_16_105_133
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_133
./GIVH_AT2_to_GMH_S60/tile_16_105_134
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_134
./GIVH_AT2_to_GMH_S60/tile_16_105_135
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_135
./GIVH_AT2_to_GMH_S60/tile_16_105_136
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_136
./GIVH_AT2_to_GMH_S60/tile_16_105_137
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_137
./GIVH_AT2_to_GMH_S60/tile_16_105_138
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_138
./GIVH_AT2_to_GMH_S60/tile_16_105_139
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_139
./GIVH_AT2_to_GMH_S60/tile_16_105_169
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_169
./GIVH_AT2_to_GMH_S60/tile_16_105_170
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_170
./GIVH_AT2_to_GMH_S60/tile_16_105_171
GIVH
AT2
GIVH_AT2_to_GMH_S60
tile_16_105_171

PLISM dataset

The Pathology Images of Scanners and Mobilephones (PLISM) dataset was created by (Ochi et al., 2024) for the evaluation of AI models’ robustness to inter-institutional domain shifts. All histopathological specimens used in creating the PLISM dataset were sourced from patients who were diagnosed and underwent surgery at the University of Tokyo Hospital between 1955 and 2018.

PLISM-wsi consists in a group of consecutive slides digitized under 7 different scanners and stained accross 13 H&E conditions. Each of the 91 sample encompasses the same biological information, that is a collection of 46 TMAs (Tissue Micro Arrays) from various organs. Additional details can be found in https://p024eb.github.io/ and the original publication

drawing

Figure 1: Tissue types included in TMA specimens of the PLISM-wsi dataset. Source: https://p024eb.github.io/ (Ochi et al., 2024)

drawing

Figure 2: Digitization and staining worflow for PLISM dataset. Source: https://p024eb.github.io/ (Ochi et al., 2024)

PLISM dataset tiles

The original PLISM-wsi subset contains a total of 310,947 images. Registration was performed across all scanners and staining conditions using OpenCV's AKAZE (Alcantarilla et al., 2013) key-point matching algorithm. There were 3,417 aligned image groups, with a total of 310,947 (3,417 groups × 91 WSIs) image patches of shape 512x512 at a resolution ranging from 0.22 to 0.26 µm/pixel (40x magnification).

To follow the spirit of this unique and outstanding contribution, we generated an extended version of the original tiles dataset provided by (Ochi et al. 2024) so as to ease its adoption accross the digital pathology community and serve as a reference dataset for benchmarking the robustess of foundation models to staining and scanner variations. In particular, our work differs from the original dataset in the following aspects:

• The original, non-registered WSIs were registered using Elastix (Klein et al., 2010; Shamonin et al., 2014). The reference slide was stained with GMH condition and digitized using Hamamatsu Nanozoomer S60 scanner.

• Tiles of 224x224 pixels were extracted at mpp 0.5 µm/pixel (20x magnification) using an in-house bidirectionnal U-Net (Ronneberger et al., 2015).

• All tiles from the original WSI were extracted, resulting in 16,278 tiles for each of the 91 WSIs.

In total, our dataset encompasses 1,481,298 histology tiles for a total size of 150 Gb.

For each tile, we provide the original slide id (slide_id), tile id (tile_id), stainer and scanner.

How to extract features

The following code snippet allows you to extract features with your feature extractor. 91 folders will be created, each named by the slide_id and containing a features.npy file. This feature file is a numpy array of shape (16278, 3+d) where d is the output dimension of your model and 3 corresponds to (deepzoom_level, x_coordinate, y_coordinate).

Tile coordinates are in the same order for each slide inside the dataset. No additional sorting is required to compare feature matrices between different slides (first element of each matrix corresponds to the same tile location).

2h30 and roughly 10 Gb storage are necessary to extract all features with a ViT-B model, 16 CPUs and 1 Nvidia T4 (16Go).

🎉 We plan to release a dedicated Github repository to properly extract features and compute metrics as done in (Filiot et al., 2025).


# Generic libraries
from __future__ import annotations
from pathlib import Path
from PIL import Image
from loguru import logger
from tqdm import tqdm

# Tensor-related libraries
import numpy as np
import torch
from torch.utils.data import DataLoader
import datasets

# You first need to login with your HF token
#from huggingface_hub import login
#login()

# Set your PIL.Image transform and embedding model
#transform = # torchvision.transforms.transforms transforming PIL Image into Tensor
#model = # torch.nn.Module outputing a tensor of features of shape (batch_size, features_dimension)

# You can tweak the batch size depending on your hardware or model
batch_size = 32
num_slides = 91
num_tiles = 16278
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Set your export directory
export_dir = Path("/path/to/your/export/directory/")


def collate(batch: list[dict[str, str | Image]]) -> tuple[list[str], list[str], torch.Tensor]:
    """Return slide ids, tile ids and transformed images."""
    slide_ids =[b["slide_id"] for b in batch]
    tile_ids = [b["tile_id"] for b in batch]
    imgs = torch.stack([transform(b["png"]) for b in batch], axis=0)
    return (slide_ids, tile_ids, imgs)


def process_imgs(imgs: torch.Tensor, tile_ids: list[str]) -> torch.Tensor:
    """Perform inference on input (already transformed) images."""
    with torch.inference_mode():
        batch_features = model(imgs.to(device)).squeeze() # (N_tiles, d) numpy array
        batch_tiles_coordinates = np.array([tile_id.split("_")[1:] for tile_id in tile_ids]).astype(int) # (N_tiles, 3) numpy array
    batch_stack = np.concatenate([batch_tiles_coordinates, batch_features], axis=1)
    return batch_stack

def save_features(slide_features: list[np.ndarray], slide_id: str):
    """Save features to disk."""
    slide_features_export_dir = Path(export_dir / slide_id)
    slide_features_export_path = slides_features_export_dir / "features.npy"
    slide_features_export_dir.mkdir(exist_ok=True, parents=True)
    output_slide_features = np.concatenate(slide_features, axis=0).astype(np.float32)
    slide_num_tiles = output_slide_features.shape[0]
    assert slide_num_tiles == num_tiles, f"Output features for slide {slide_id} contains {slide_num_tiles} < {num_tiles}."
    np.save(slides_features_export_path, output_slide_features)
    logger.success(f"Successfully saved features for slide: {slide_id}")

# Create the dataset and dataloader without actually loading the files to disk (`streaming=True`)
# The dataset is sorted by slide_id, meaning that the first 16278 indexes belong to the same first slide,
# then 16278:32556 to the second slide, etc.
dataset = datasets.load_dataset("owkin/plism-dataset-tiles", split="train", streaming=True)
dataloader = DataLoader(
    dataset, batch_size=batch_size, collate_fn=collate, num_workers=0, pin_memory=True, shuffle=False
)

# Iterate over the full dataset and store features each time 16278 input images have been processed
   
slide_features = []
current_num_tiles = 0

for (slide_ids, tile_ids, imgs) in tqdm(
    dataloader,
    total=ceil(num_slides * num_tiles / batch_size),
    desc="Extracting features"
):
    reference_slide_id = slide_ids[0]

    # If we're on the same slide, we just add the batch features to the running list
    if all(slide_id == reference_slide_id for slide_id in slide_ids):
        batch_stack = process_imgs(imgs, tile_ids)
        slide_features.append(batch_stack)
        # For the very last slide, the last batch may be of size < `batch_size`
        current_num_tiles += batch_stack.shape[0]
        # If the current batch contains exactly the last `batch_size` tile features for the slide,
        # export the slide features and reset `slide_features` and `current_num_tiles`
        if current_num_tiles == num_tiles:
            save_features(slide_features, slide_id=reference_slide_id)
            slide_features = []
            current_num_tiles = 0
    # The current batch contains tiles from slide N (`reference_slide_id`) and slide N+1
    else:
        # We retrieve the maximum index at which all tiles in the batch comes from slide N
        mask = (np.array(slide_ids) != reference_slide_id)
        idx = mask.argmax()
        # And only process the later, then export the slides features
        batch_stack = process_imgs(imgs[:idx], tile_ids[:idx]
        slide_features.append(batch_stack)
        save_features(slide_features, slide_id=reference_slide_id)
        # We initialize `slide_features` and `current_num_tiles` with respectively
        # the tile features from slide N+1
        slide_features = [process_imgs(imgs[idx:], tile_ids[idx:])]
        current_num_tiles = batch_size - idx

License

This dataset is licensed under CC BY 4.0 licence.

Acknowledgments

We thank PLISM dataset's authors for their unique contribution.

Third-party licenses

How to cite

If you are using this dataset, please cite the original article (Ochi et al., 2024) and our work as follows:

APA style

Filiot, A., Dop, N., Tchita, O., Riou, A., Peeters, T., Valter, D., Scalbert, M., Saillard, C., Robin, G., & Olivier, A. (2025). Distilling foundation models for robust and efficient models in digital pathology. arXiv. https://arxiv.org/abs/2501.16239

BibTex entry

@misc{filiot2025distillingfoundationmodelsrobust,
      title={Distilling foundation models for robust and efficient models in digital pathology}, 
      author={Alexandre Filiot and Nicolas Dop and Oussama Tchita and Auriane Riou and Thomas Peeters and Daria Valter and Marin Scalbert and Charlie Saillard and Geneviève Robin and Antoine Olivier},
      year={2025},
      eprint={2501.16239},
      archivePrefix={arXiv},
      primaryClass={cs.CV},
      url={https://arxiv.org/abs/2501.16239}, 
}

References

  • (Ochi et al., 2024) Ochi, M., Komura, D., Onoyama, T. et al. Registered multi-device/staining histology image dataset for domain-agnostic machine learning models. Sci Data 11, 330 (2024).

  • (Alcantarilla et al., 2013) Alcantarilla, P., Nuevo, J. & Bartoli, A. Fast explicit diffusion for accelerated features in nonlinear scale spaces. in Procedings of the British Machine Vision Conference 13.1–13.11 (British Machine Vision Assoc., 2013).

  • (Ronneberger et al., 2015) Ronneberger, O., Fischer, P., & Brox, T. (2015). U-Net: Convolutional networks for biomedical image segmentation. arXiv.

  • (Klein et al., 2010) Klein, S., Staring, M., Murphy, K., Viergever, M. A., & Pluim, J. P. W. (2010). Elastix: A toolbox for intensity-based medical image registration. IEEE Transactions on Medical Imaging, 29(1), 196–205.

  • (Shamonin et al., 2014) Shamonin, D. P., Bron, E. E., Lelieveldt, B. P. F., Smits, M., Klein, S., & Staring, M. (2014). Fast parallel image registration on CPU and GPU for diagnostic classification of Alzheimer's disease. Frontiers in Neuroinformatics, 7, 50.

  • (Filiot et al., 2025) Filiot, A., Dop, N., Tchita, O., Riou, A., Peeters, T., Valter, D., Scalbert, M., Saillard, C., Robin, G., & Olivier, A. (2025). Distilling foundation models for robust and efficient models in digital pathology. arXiv. https://arxiv.org/abs/2501.16239

Downloads last month
146