Add code for benchmarking
Browse files- benchmarks/.gitignore +1 -0
- benchmarks/Dockerfile +22 -0
- benchmarks/lint.sh +2 -0
- benchmarks/models/README.md +124 -0
- benchmarks/models/bpr_als/main.py +301 -0
- benchmarks/models/itemknn/README.md +21 -0
- benchmarks/models/itemknn/main.py +253 -0
- benchmarks/models/popularity/main.py +183 -0
- benchmarks/models/random_rec/main.py +128 -0
- benchmarks/models/sansa/README.md +9 -0
- benchmarks/models/sansa/main.py +284 -0
- benchmarks/models/sasrec/data.py +194 -0
- benchmarks/models/sasrec/eval.py +127 -0
- benchmarks/models/sasrec/model.py +270 -0
- benchmarks/models/sasrec/train.py +150 -0
- benchmarks/pyproject.toml +44 -0
- benchmarks/scripts/get_dataset_stats.py +210 -0
- benchmarks/scripts/make_multievent.py +88 -0
- benchmarks/scripts/transform2sequential.py +76 -0
- benchmarks/tests/test_timesplit.py +50 -0
- benchmarks/yambda/__init__.py +0 -0
- benchmarks/yambda/constants.py +29 -0
- benchmarks/yambda/evaluation/__init__.py +0 -0
- benchmarks/yambda/evaluation/metrics.py +204 -0
- benchmarks/yambda/evaluation/ranking.py +145 -0
- benchmarks/yambda/processing/__init__.py +0 -0
- benchmarks/yambda/processing/chunk_read.py +79 -0
- benchmarks/yambda/processing/timesplit.py +214 -0
- benchmarks/yambda/utils.py +42 -0
benchmarks/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
__pycache__/
|
benchmarks/Dockerfile
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# cuda11.8 due to implicit
|
2 |
+
FROM pytorch/pytorch:2.7.0-cuda11.8-cudnn9-devel@sha256:e567ea2642d4e25ef647e9872d09980613a7b1ecc8d2973e339c60a07343046f
|
3 |
+
|
4 |
+
WORKDIR /yambda
|
5 |
+
|
6 |
+
RUN apt-get update && \
|
7 |
+
apt-get install -y --no-install-recommends \
|
8 |
+
libsuitesparse-dev \
|
9 |
+
build-essential \
|
10 |
+
git
|
11 |
+
|
12 |
+
|
13 |
+
ENV CUDACXX=/usr/local/cuda/bin/nvcc \
|
14 |
+
SUITESPARSE_INCLUDE_DIR=/usr/include/suitesparse \
|
15 |
+
SUITESPARSE_LIBRARY_DIR=/usr/lib
|
16 |
+
|
17 |
+
RUN pip install implicit
|
18 |
+
|
19 |
+
RUN git clone https://github.com/glami/sansa.git && cd sansa && pip install . && cd ..
|
20 |
+
|
21 |
+
COPY . .
|
22 |
+
RUN pip install -e .
|
benchmarks/lint.sh
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
ruff check --fix ./
|
2 |
+
ruff format ./
|
benchmarks/models/README.md
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Reproducibility Guide
|
2 |
+
|
3 |
+
## Overview
|
4 |
+
|
5 |
+
This part of repo contains the implementation and experiments. This guide will help you reproduce the results using Docker or manual installation.
|
6 |
+
|
7 |
+
---
|
8 |
+
|
9 |
+
## Docker Setup (Recommended)
|
10 |
+
|
11 |
+
### 1. Build Docker Image
|
12 |
+
|
13 |
+
```bash
|
14 |
+
docker build -t yambda-image .
|
15 |
+
```
|
16 |
+
|
17 |
+
### 2. Run Container with GPU Support
|
18 |
+
|
19 |
+
```bash
|
20 |
+
docker run --gpus all \
|
21 |
+
--runtime=nvidia \
|
22 |
+
-it \
|
23 |
+
-v </absolute/path/to/local/data>:/yambda/data \
|
24 |
+
yambda-image
|
25 |
+
```
|
26 |
+
|
27 |
+
---
|
28 |
+
|
29 |
+
## Data Organization
|
30 |
+
|
31 |
+
Create following structure in mounted data directory:
|
32 |
+
|
33 |
+
```bash
|
34 |
+
data/
|
35 |
+
├── flat/
|
36 |
+
│ └── 50m/
|
37 |
+
│ ├── likes.parquet
|
38 |
+
│ ├── listens.parquet
|
39 |
+
│ └── ...
|
40 |
+
└── sequential/
|
41 |
+
└── 50m/
|
42 |
+
├── likes.parquet
|
43 |
+
├── listens.parquet
|
44 |
+
└── ...
|
45 |
+
```
|
46 |
+
|
47 |
+
Note:
|
48 |
+
Sequential data is only needed for sasrec. You can build it from flat using scripts/transform2sequential.py or download
|
49 |
+
|
50 |
+
---
|
51 |
+
|
52 |
+
## Running Experiments
|
53 |
+
|
54 |
+
### General Usage
|
55 |
+
|
56 |
+
```bash
|
57 |
+
# For example random_rec
|
58 |
+
|
59 |
+
cd models/random_rec/
|
60 |
+
|
61 |
+
# Show help for main script
|
62 |
+
python main.py --help
|
63 |
+
|
64 |
+
# Basic execution
|
65 |
+
python main.py
|
66 |
+
```
|
67 |
+
|
68 |
+
### Specific Methods
|
69 |
+
|
70 |
+
#### BPR/ALS
|
71 |
+
|
72 |
+
```bash
|
73 |
+
cd models/bpr_als
|
74 |
+
|
75 |
+
python main.py --model bpr
|
76 |
+
python main.py --model als
|
77 |
+
```
|
78 |
+
|
79 |
+
#### SASRec
|
80 |
+
|
81 |
+
```bash
|
82 |
+
cd models/sasrec
|
83 |
+
|
84 |
+
# Training
|
85 |
+
python train.py --exp_name exp1
|
86 |
+
|
87 |
+
# Evaluation
|
88 |
+
python eval.py --exp_name exp1
|
89 |
+
```
|
90 |
+
---
|
91 |
+
|
92 |
+
## Manual Installation (Not Recommedned)
|
93 |
+
|
94 |
+
### 1. Install Core Dependencies
|
95 |
+
|
96 |
+
```bash
|
97 |
+
pip install torch torchvision torchaudio
|
98 |
+
```
|
99 |
+
|
100 |
+
### 2. Install Implicit (CUDA 11.8 required)
|
101 |
+
|
102 |
+
Implicit works only with cuda<12. See reasons [here](https://github.com/NVIDIA/nvidia-docker/issues/700#issuecomment-381073278)
|
103 |
+
|
104 |
+
```bash
|
105 |
+
CUDACXX=/usr/local/cuda-11.8/bin/nvcc \
|
106 |
+
pip install implicit
|
107 |
+
```
|
108 |
+
|
109 |
+
### 3. Install SANSA
|
110 |
+
|
111 |
+
```bash
|
112 |
+
sudo apt-get install libsuitesparse-dev
|
113 |
+
git clone https://github.com/glami/sansa.git
|
114 |
+
cd sansa && \
|
115 |
+
SUITESPARSE_INCLUDE_DIR=/usr/include/suitesparse \
|
116 |
+
SUITESPARSE_LIBRARY_DIR=/usr/lib \
|
117 |
+
pip install .
|
118 |
+
```
|
119 |
+
|
120 |
+
### 4. Install Project Package
|
121 |
+
|
122 |
+
```bash
|
123 |
+
pip install . # In root directory
|
124 |
+
```
|
benchmarks/models/bpr_als/main.py
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from pathlib import Path
|
3 |
+
from typing import Any
|
4 |
+
|
5 |
+
import click
|
6 |
+
import numpy as np
|
7 |
+
import optuna
|
8 |
+
import polars as pl
|
9 |
+
import scipy.sparse as sp
|
10 |
+
import torch
|
11 |
+
from implicit.gpu.als import AlternatingLeastSquares
|
12 |
+
from implicit.gpu.bpr import BayesianPersonalizedRanking
|
13 |
+
|
14 |
+
from yambda.constants import Constants
|
15 |
+
from yambda.evaluation.metrics import calc_metrics
|
16 |
+
from yambda.evaluation.ranking import Embeddings, Targets, rank_items
|
17 |
+
from yambda.processing import timesplit
|
18 |
+
from yambda.utils import mean_dicts
|
19 |
+
|
20 |
+
|
21 |
+
@click.command()
|
22 |
+
@click.option(
|
23 |
+
'--data_dir', required=True, type=str, default="../../data/flat", show_default=True, help="Expects flat data"
|
24 |
+
)
|
25 |
+
@click.option(
|
26 |
+
'--size',
|
27 |
+
required=True,
|
28 |
+
type=click.Choice(['50m', '500m', "5b"]),
|
29 |
+
default=["50m"],
|
30 |
+
multiple=True,
|
31 |
+
show_default=True,
|
32 |
+
)
|
33 |
+
@click.option(
|
34 |
+
'--interaction',
|
35 |
+
required=True,
|
36 |
+
type=click.Choice(['likes', 'listens']),
|
37 |
+
default=["likes"],
|
38 |
+
multiple=True,
|
39 |
+
show_default=True,
|
40 |
+
)
|
41 |
+
@click.option(
|
42 |
+
"--model",
|
43 |
+
required=True,
|
44 |
+
type=click.Choice(['als', 'bpr']),
|
45 |
+
multiple=False,
|
46 |
+
show_default=True,
|
47 |
+
)
|
48 |
+
@click.option('--validation_metric', required=True, type=str, default="ndcg@100", show_default=True)
|
49 |
+
@click.option('--report_metrics', required=True, type=str, default=Constants.METRICS, multiple=True, show_default=True)
|
50 |
+
@click.option('--device', required=True, type=str, default="cuda:0", show_default=True)
|
51 |
+
def main(
|
52 |
+
data_dir: str,
|
53 |
+
size: list[str],
|
54 |
+
interaction: list[str],
|
55 |
+
model: str,
|
56 |
+
validation_metric: str,
|
57 |
+
report_metrics: list[str],
|
58 |
+
device: str,
|
59 |
+
):
|
60 |
+
print(f"REPORT METRICS: {report_metrics}")
|
61 |
+
|
62 |
+
for s in size:
|
63 |
+
for i in interaction:
|
64 |
+
print(f"SIZE {s}, INTERACTION {i}")
|
65 |
+
result = bpr_als(
|
66 |
+
data_dir, s, i, device, model, validation_metric=validation_metric, report_metrics=report_metrics
|
67 |
+
)
|
68 |
+
print(json.dumps(result, indent=2))
|
69 |
+
|
70 |
+
|
71 |
+
def scan(path: str, dataset_size: str, dataset_name: str) -> pl.LazyFrame:
|
72 |
+
path: Path = Path(path) / dataset_size / dataset_name
|
73 |
+
df = pl.scan_parquet(path.with_suffix(".parquet"))
|
74 |
+
return df
|
75 |
+
|
76 |
+
|
77 |
+
def preprocess(
|
78 |
+
df: pl.LazyFrame, interaction: str, val_size: int
|
79 |
+
) -> tuple[pl.LazyFrame, pl.LazyFrame | None, pl.LazyFrame]:
|
80 |
+
if interaction == "listens":
|
81 |
+
df = df.filter(pl.col("played_ratio_pct") >= Constants.TRACK_LISTEN_THRESHOLD).select(
|
82 |
+
"uid", "item_id", "timestamp"
|
83 |
+
)
|
84 |
+
|
85 |
+
train, val, test = timesplit.flat_split_train_val_test(
|
86 |
+
df, val_size=val_size, test_timestamp=Constants.TEST_TIMESTAMP
|
87 |
+
)
|
88 |
+
|
89 |
+
return (
|
90 |
+
train,
|
91 |
+
val.collect(engine="streaming").lazy() if val is not None else None,
|
92 |
+
test.collect(engine="streaming").lazy(),
|
93 |
+
)
|
94 |
+
|
95 |
+
|
96 |
+
def process_train_data(df: pl.LazyFrame) -> tuple[pl.LazyFrame, list[int], list[int]]:
|
97 |
+
unique_pairs = df.select("uid", "item_id").unique()
|
98 |
+
|
99 |
+
unique_uids = df.select("uid").unique().sort("uid").collect(engine="streaming")["uid"].to_list()
|
100 |
+
unique_item_ids = df.select("item_id").unique().sort("item_id").collect(engine="streaming")["item_id"].to_list()
|
101 |
+
|
102 |
+
return unique_pairs, unique_uids, unique_item_ids
|
103 |
+
|
104 |
+
|
105 |
+
def build_csr_matrix(pairs: pl.LazyFrame, unique_uids: list[int], unique_item_ids: list[int]) -> sp.csr_matrix:
|
106 |
+
uid_to_idx = {uid: i for i, uid in enumerate(unique_uids)}
|
107 |
+
item_id_to_idx = {item_id: i for i, item_id in enumerate(unique_item_ids)}
|
108 |
+
|
109 |
+
pairs = pairs.select(
|
110 |
+
pl.col("uid").replace_strict(uid_to_idx, return_dtype=pl.UInt32),
|
111 |
+
pl.col("item_id").replace_strict(item_id_to_idx, return_dtype=pl.UInt32),
|
112 |
+
).collect(engine="streaming")
|
113 |
+
|
114 |
+
rows, cols = pairs["uid"].to_numpy(), pairs["item_id"].to_numpy()
|
115 |
+
values = np.ones_like(rows, dtype=np.int32)
|
116 |
+
|
117 |
+
return sp.coo_matrix(
|
118 |
+
(values, (rows, cols)), dtype=np.float32, shape=(len(unique_uids), len(unique_item_ids))
|
119 |
+
).tocsr()
|
120 |
+
|
121 |
+
|
122 |
+
def train_embbedings_with_als(
|
123 |
+
user_item_interactions: sp.csr_matrix,
|
124 |
+
regularization: float = 0.01,
|
125 |
+
iterations: int = 100,
|
126 |
+
random_state: int = 42,
|
127 |
+
factors: int = 64,
|
128 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
129 |
+
als = AlternatingLeastSquares(
|
130 |
+
factors=factors,
|
131 |
+
regularization=regularization,
|
132 |
+
iterations=iterations,
|
133 |
+
random_state=random_state,
|
134 |
+
calculate_training_loss=False,
|
135 |
+
)
|
136 |
+
als.fit(user_item_interactions, show_progress=False)
|
137 |
+
return als.user_factors.to_numpy(), als.item_factors.to_numpy()
|
138 |
+
|
139 |
+
|
140 |
+
def train_embbedings_with_bpr(
|
141 |
+
user_item_interactions: sp.csr_matrix,
|
142 |
+
learning_rate: float = 0.01,
|
143 |
+
regularization: float = 0.01,
|
144 |
+
iterations: int = 100,
|
145 |
+
random_state: int = 42,
|
146 |
+
factors: int = 64,
|
147 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
148 |
+
bpr = BayesianPersonalizedRanking(
|
149 |
+
factors=factors,
|
150 |
+
learning_rate=learning_rate,
|
151 |
+
regularization=regularization,
|
152 |
+
iterations=iterations,
|
153 |
+
random_state=random_state,
|
154 |
+
verify_negative_samples=True,
|
155 |
+
)
|
156 |
+
bpr.fit(user_item_interactions, show_progress=False)
|
157 |
+
return bpr.user_factors.to_numpy(), bpr.item_factors.to_numpy()
|
158 |
+
|
159 |
+
|
160 |
+
def calc_embeddings_metrics(
|
161 |
+
user_emb: np.ndarray,
|
162 |
+
item_emb: np.ndarray,
|
163 |
+
uid_tensor: torch.Tensor,
|
164 |
+
item_id_tensor: torch.Tensor,
|
165 |
+
targets: Targets,
|
166 |
+
metrics: list[str],
|
167 |
+
device: str,
|
168 |
+
) -> dict[str, dict[int, float]]:
|
169 |
+
num_ranked_items = max([int(x.split("@")[1]) for x in metrics])
|
170 |
+
user_emb = Embeddings(uid_tensor, torch.from_numpy(user_emb).to(device))
|
171 |
+
item_emb = Embeddings(item_id_tensor, torch.from_numpy(item_emb).to(device))
|
172 |
+
|
173 |
+
ranked_items = rank_items(user_emb, item_emb, num_ranked_items)
|
174 |
+
return calc_metrics(ranked_items, targets, metrics)
|
175 |
+
|
176 |
+
|
177 |
+
def hyperopt(
|
178 |
+
train: pl.LazyFrame,
|
179 |
+
val: pl.LazyFrame,
|
180 |
+
n_trials: int,
|
181 |
+
model: str,
|
182 |
+
validation_metric: str,
|
183 |
+
device: str,
|
184 |
+
) -> dict[str, float | int]:
|
185 |
+
pairs, unique_uids, unique_item_ids = process_train_data(train)
|
186 |
+
|
187 |
+
user_item_interactions = build_csr_matrix(pairs, unique_uids, unique_item_ids)
|
188 |
+
|
189 |
+
targets = Targets.from_sequential(val.group_by("uid").agg("item_id").collect(engine="streaming"), device)
|
190 |
+
|
191 |
+
def objective(trial: optuna.Trial) -> float:
|
192 |
+
iterations = trial.suggest_int(name="iterations", low=10, high=300, log=True)
|
193 |
+
regularization = trial.suggest_float(name="regularization", low=1e-5, high=1, log=True)
|
194 |
+
|
195 |
+
if model == "als":
|
196 |
+
user_emb, item_emb = train_embbedings_with_als(user_item_interactions, regularization, iterations)
|
197 |
+
else:
|
198 |
+
learning_rate = trial.suggest_float(name="learning_rate", low=1e-5, high=0.1, log=True)
|
199 |
+
user_emb, item_emb = train_embbedings_with_bpr(
|
200 |
+
user_item_interactions, learning_rate, regularization, iterations
|
201 |
+
)
|
202 |
+
|
203 |
+
metrics = calc_embeddings_metrics(
|
204 |
+
user_emb,
|
205 |
+
item_emb,
|
206 |
+
torch.tensor(unique_uids, device=device),
|
207 |
+
torch.tensor(unique_item_ids, device=device),
|
208 |
+
targets,
|
209 |
+
[validation_metric],
|
210 |
+
device,
|
211 |
+
)
|
212 |
+
|
213 |
+
t, k = validation_metric.split('@')
|
214 |
+
return metrics[t][int(k)]
|
215 |
+
|
216 |
+
optuna.logging.set_verbosity(optuna.logging.WARNING)
|
217 |
+
study = optuna.create_study(direction="maximize")
|
218 |
+
study.optimize(objective, n_trials=n_trials)
|
219 |
+
|
220 |
+
return study.best_params
|
221 |
+
|
222 |
+
|
223 |
+
def evaluation(
|
224 |
+
train: pl.LazyFrame,
|
225 |
+
test: pl.LazyFrame,
|
226 |
+
model: str,
|
227 |
+
hp: dict[str, Any],
|
228 |
+
random_seeds: list[int],
|
229 |
+
report_metrics: list[str],
|
230 |
+
device: str,
|
231 |
+
) -> dict[str, dict[int, float]]:
|
232 |
+
train, unique_uids, unique_item_ids = process_train_data(train)
|
233 |
+
|
234 |
+
user_item_interactions = build_csr_matrix(train, unique_uids, unique_item_ids)
|
235 |
+
|
236 |
+
targets = Targets.from_sequential(
|
237 |
+
test.group_by("uid").agg("item_id").collect(engine="streaming"),
|
238 |
+
device,
|
239 |
+
)
|
240 |
+
|
241 |
+
metrics_list = []
|
242 |
+
|
243 |
+
for seed in random_seeds:
|
244 |
+
if model == "als":
|
245 |
+
user_emb, item_emb = train_embbedings_with_als(
|
246 |
+
user_item_interactions, hp["regularization"], hp["iterations"], seed
|
247 |
+
)
|
248 |
+
else:
|
249 |
+
user_emb, item_emb = train_embbedings_with_bpr(
|
250 |
+
user_item_interactions, hp["learning_rate"], hp["regularization"], hp["iterations"], seed
|
251 |
+
)
|
252 |
+
|
253 |
+
metrics = calc_embeddings_metrics(
|
254 |
+
user_emb,
|
255 |
+
item_emb,
|
256 |
+
torch.tensor(unique_uids, device=device),
|
257 |
+
torch.tensor(unique_item_ids, device=device),
|
258 |
+
targets,
|
259 |
+
report_metrics,
|
260 |
+
device,
|
261 |
+
)
|
262 |
+
metrics_list.append(metrics)
|
263 |
+
|
264 |
+
return mean_dicts(metrics_list)
|
265 |
+
|
266 |
+
|
267 |
+
def bpr_als(
|
268 |
+
data_dir: str,
|
269 |
+
size: str,
|
270 |
+
interaction: str,
|
271 |
+
device: str,
|
272 |
+
model: str,
|
273 |
+
validation_metric: str,
|
274 |
+
report_metrics: list[str],
|
275 |
+
n_trials: dict[str, int] = {"50m": 10, "500m": 2, "5b": 2},
|
276 |
+
):
|
277 |
+
df = scan(data_dir, size, interaction)
|
278 |
+
|
279 |
+
# hyperopt by validation
|
280 |
+
train, val, _ = preprocess(df, interaction, val_size=Constants.VAL_SIZE)
|
281 |
+
hp = hyperopt(train, val, n_trials[size], model, validation_metric, device)
|
282 |
+
|
283 |
+
print("Best HP:")
|
284 |
+
print(json.dumps(hp, indent=2))
|
285 |
+
|
286 |
+
# final model
|
287 |
+
train, _, test = preprocess(df, interaction, val_size=0)
|
288 |
+
|
289 |
+
return evaluation(
|
290 |
+
train,
|
291 |
+
test,
|
292 |
+
model,
|
293 |
+
hp,
|
294 |
+
random_seeds=[41, 42, 43],
|
295 |
+
report_metrics=report_metrics,
|
296 |
+
device=device,
|
297 |
+
)
|
298 |
+
|
299 |
+
|
300 |
+
if __name__ == "__main__":
|
301 |
+
main()
|
benchmarks/models/itemknn/README.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## ItemKNN
|
2 |
+
|
3 |
+
We employ cosine similarity to measure the similarity between vectors. The item vectors are num_users-dimensional vectors derived from the user-item interaction matrix. In the simplest case, we generate top-k recommendations by retrieving the nearest vectors to the user’s most recent item (which corresponds to a decay parameter $\tau \rightarrow 0$). The more general formulation is as follows:
|
4 |
+
|
5 |
+
$score(user_i, item_j) = cos(V[i, :], U[:,j])$,
|
6 |
+
|
7 |
+
where $U$ is num_user x num_items interaction matrix,
|
8 |
+
|
9 |
+
$V$ is num_users x num_users matrix, $V[i, :] = \sum_{(t, k) \in A_i} \tau^{max_t(i) - t} \cdot U[:, k]^T$, where $A_i$ is set of $i$-th user's (interaction_timestamp, item_index) pairs, $max_t(i)$ is last $i$-th user's interaction timestamp, $\tau$ is the decay coefficient (per second).
|
10 |
+
|
11 |
+
The hyperparameter “hour” defines the time period (in hours) associated with a decay factor of 0.9.
|
12 |
+
|
13 |
+
For 5b datasets, the matrix multiplications [num_users × num_item] and [num_item × num_items] to obtain [num_user x num_user] matrix of user embeddings exceed memory constraints. We also use cosine similarity to the mean embedding instead of pairwise similarities to avoid new dimension (x basket_size).
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
benchmarks/models/itemknn/main.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
import json
|
3 |
+
from pathlib import Path
|
4 |
+
from typing import Any
|
5 |
+
|
6 |
+
import click
|
7 |
+
import polars as pl
|
8 |
+
import torch
|
9 |
+
|
10 |
+
from yambda.constants import Constants
|
11 |
+
from yambda.evaluation.metrics import calc_metrics
|
12 |
+
from yambda.evaluation.ranking import Embeddings, Targets, rank_items
|
13 |
+
from yambda.processing import timesplit
|
14 |
+
from yambda.utils import argmax
|
15 |
+
|
16 |
+
|
17 |
+
DEFAULT_GRIDS = {
|
18 |
+
"50m": {
|
19 |
+
"likes": [0, 0.001, 0.25, 0.5, 1, 2, 3, 4, 6],
|
20 |
+
"listens": [0, 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.5, 1.0, 2],
|
21 |
+
},
|
22 |
+
"500m": {
|
23 |
+
"likes": [0, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.5, 1.0],
|
24 |
+
"listens": [0, 0.001, 0.002, 0.004, 0.008],
|
25 |
+
},
|
26 |
+
}
|
27 |
+
|
28 |
+
|
29 |
+
@click.command()
|
30 |
+
@click.option(
|
31 |
+
'--data_dir', required=True, type=str, default="../../data/flat", show_default=True, help="Expects flat data"
|
32 |
+
)
|
33 |
+
@click.option(
|
34 |
+
'--size',
|
35 |
+
required=True,
|
36 |
+
type=click.Choice(['50m', '500m']), # 5b is not supported
|
37 |
+
default="50m",
|
38 |
+
multiple=False,
|
39 |
+
show_default=True,
|
40 |
+
help="5b is not supported due to (num_user, num_user) matrix",
|
41 |
+
)
|
42 |
+
@click.option(
|
43 |
+
'--interaction',
|
44 |
+
required=True,
|
45 |
+
type=click.Choice(['likes', 'listens']),
|
46 |
+
default="likes",
|
47 |
+
multiple=False,
|
48 |
+
show_default=True,
|
49 |
+
)
|
50 |
+
@click.option(
|
51 |
+
'--hours',
|
52 |
+
required=True,
|
53 |
+
type=float,
|
54 |
+
default=[-1],
|
55 |
+
multiple=True,
|
56 |
+
show_default=True,
|
57 |
+
help="Hyperparameter. If -1 default grid will be used",
|
58 |
+
)
|
59 |
+
@click.option('--validation_metric', required=True, type=str, default="ndcg@100", show_default=True)
|
60 |
+
@click.option('--report_metrics', required=True, type=str, default=Constants.METRICS, multiple=True, show_default=True)
|
61 |
+
@click.option('--device', required=True, type=str, default="cuda:0", show_default=True)
|
62 |
+
def main(
|
63 |
+
data_dir: str,
|
64 |
+
size: str,
|
65 |
+
interaction: str,
|
66 |
+
hours: list[float],
|
67 |
+
validation_metric: str,
|
68 |
+
report_metrics: list[str],
|
69 |
+
device: str,
|
70 |
+
):
|
71 |
+
print(f"REPORT METRICS: {report_metrics}")
|
72 |
+
print(f"SIZE {size}, INTERACTION {interaction}")
|
73 |
+
result = item_knn(
|
74 |
+
data_dir,
|
75 |
+
size,
|
76 |
+
interaction,
|
77 |
+
device,
|
78 |
+
hours=hours if hours[0] != -1 else DEFAULT_GRIDS[size][interaction],
|
79 |
+
validation_metric=validation_metric,
|
80 |
+
report_metrics=report_metrics,
|
81 |
+
)
|
82 |
+
print(json.dumps(result, indent=2))
|
83 |
+
|
84 |
+
|
85 |
+
def scan(path: str, dataset_size: str, dataset_name: str) -> pl.LazyFrame:
|
86 |
+
path = Path(path) / dataset_size / dataset_name
|
87 |
+
df = pl.scan_parquet(path.with_suffix(".parquet"))
|
88 |
+
return df
|
89 |
+
|
90 |
+
|
91 |
+
def preprocess(
|
92 |
+
df: pl.LazyFrame, interaction: str, val_size: int
|
93 |
+
) -> tuple[pl.LazyFrame, pl.LazyFrame | None, pl.LazyFrame]:
|
94 |
+
if interaction == "listens":
|
95 |
+
df = df.filter(pl.col("played_ratio_pct") >= Constants.TRACK_LISTEN_THRESHOLD)
|
96 |
+
|
97 |
+
train, val, test = timesplit.flat_split_train_val_test(
|
98 |
+
df, val_size=val_size, test_timestamp=Constants.TEST_TIMESTAMP
|
99 |
+
)
|
100 |
+
|
101 |
+
return (
|
102 |
+
train,
|
103 |
+
val.collect(engine="streaming").lazy() if val is not None else None,
|
104 |
+
test.collect(engine="streaming").lazy(),
|
105 |
+
)
|
106 |
+
|
107 |
+
|
108 |
+
def eliminate_zeros(x: torch.Tensor, threshold: float = 1e-9) -> torch.Tensor:
|
109 |
+
mask = (x._values() > threshold).nonzero()
|
110 |
+
nv = x._values().index_select(0, mask.view(-1))
|
111 |
+
ni = x._indices().index_select(1, mask.view(-1))
|
112 |
+
return torch.sparse_coo_tensor(ni, nv, x.shape)
|
113 |
+
|
114 |
+
|
115 |
+
def create_weighted_sparse_tensor(train: pl.LazyFrame, tau: float) -> torch.Tensor:
|
116 |
+
uid_mapping = (
|
117 |
+
train.select("uid").unique().with_columns(pl.col("uid").rank(method="dense").alias("uid_idx") - 1).collect()
|
118 |
+
)
|
119 |
+
|
120 |
+
item_mapping = (
|
121 |
+
train.select("item_id")
|
122 |
+
.unique()
|
123 |
+
.with_columns(pl.col("item_id").rank(method="dense").alias("item_idx") - 1)
|
124 |
+
.collect()
|
125 |
+
)
|
126 |
+
|
127 |
+
processed = (
|
128 |
+
train.with_columns(pl.max("timestamp").over("uid").alias("max_timestamp"))
|
129 |
+
.with_columns((pl.col("max_timestamp") - pl.col("timestamp")).alias("delta"))
|
130 |
+
.with_columns((tau ** pl.col("delta")).alias("weight"))
|
131 |
+
.join(uid_mapping.lazy(), on="uid", how="inner")
|
132 |
+
.join(item_mapping.lazy(), on="item_id", how="inner")
|
133 |
+
)
|
134 |
+
|
135 |
+
coo_data = processed.group_by(["uid_idx", "item_idx"]).agg(pl.sum("weight").alias("total_weight")).collect()
|
136 |
+
|
137 |
+
indices = torch.concat([coo_data["uid_idx"].to_torch()[None, :], coo_data["item_idx"].to_torch()[None, :]], dim=0)
|
138 |
+
values = torch.tensor(coo_data["total_weight"].to_numpy(), dtype=torch.float)
|
139 |
+
|
140 |
+
return eliminate_zeros(
|
141 |
+
torch.sparse_coo_tensor(
|
142 |
+
indices=indices, values=values, size=(uid_mapping["uid_idx"].max() + 1, item_mapping["item_idx"].max() + 1)
|
143 |
+
)
|
144 |
+
)
|
145 |
+
|
146 |
+
|
147 |
+
def sparse_normalize(sparse_tensor: torch.Tensor, dim=0, eps=1e-12):
|
148 |
+
indices = sparse_tensor.coalesce().indices()
|
149 |
+
values = sparse_tensor.coalesce().values()
|
150 |
+
|
151 |
+
unique_dim_indices, inverse = torch.unique(indices[dim], return_inverse=True)
|
152 |
+
squared_values = values**2
|
153 |
+
sum_squared = torch.zeros_like(unique_dim_indices, dtype=torch.float32)
|
154 |
+
sum_squared.scatter_add_(0, inverse, squared_values)
|
155 |
+
|
156 |
+
norms = torch.sqrt(sum_squared + eps)
|
157 |
+
normalized_values = values / norms[inverse]
|
158 |
+
|
159 |
+
return torch.sparse_coo_tensor(indices, normalized_values, sparse_tensor.size())
|
160 |
+
|
161 |
+
|
162 |
+
def training(
|
163 |
+
train: pl.LazyFrame, hour: float, user_item: torch.Tensor, user_ids: torch.Tensor, device: str, decay: float = 0.9
|
164 |
+
) -> Embeddings:
|
165 |
+
tau = 0.0 if hour == 0 else decay ** (1 / 24 / 60 / 60 / (hour / 24))
|
166 |
+
|
167 |
+
user_item_with_tau = create_weighted_sparse_tensor(train, tau)
|
168 |
+
user_embeddings = (user_item_with_tau @ user_item.T).to_dense()
|
169 |
+
user_embeddings = torch.nn.functional.normalize(user_embeddings, dim=-1)
|
170 |
+
|
171 |
+
return Embeddings(user_ids, user_embeddings.to(device))
|
172 |
+
|
173 |
+
|
174 |
+
def evaluation(
|
175 |
+
train: pl.LazyFrame, val: pl.LazyFrame, device: str, hours: list[float], metrics: list[str]
|
176 |
+
) -> list[dict[str, Any]]:
|
177 |
+
num_ranked_items = max([int(x.split("@")[1]) for x in metrics])
|
178 |
+
|
179 |
+
unique_user_ids = train.select("uid").unique().sort("uid").collect(engine="streaming")["uid"].to_torch().to(device)
|
180 |
+
unique_item_ids = (
|
181 |
+
train.select("item_id").unique().sort("item_id").collect(engine="streaming")["item_id"].to_torch().to(device)
|
182 |
+
)
|
183 |
+
|
184 |
+
user_item = create_weighted_sparse_tensor(train, 1.0)
|
185 |
+
item_embeddings = sparse_normalize(user_item.T.to(device), dim=-1)
|
186 |
+
item_embeddings = Embeddings(unique_item_ids, item_embeddings)
|
187 |
+
|
188 |
+
targets = Targets.from_sequential(
|
189 |
+
val.group_by('uid', maintain_order=True).agg(pl.all().exclude('uid')).select(['uid', 'item_id']),
|
190 |
+
device,
|
191 |
+
)
|
192 |
+
|
193 |
+
hour2metrics = []
|
194 |
+
for hour in hours:
|
195 |
+
user_embeddings = training(
|
196 |
+
train=train,
|
197 |
+
hour=hour,
|
198 |
+
user_item=user_item,
|
199 |
+
user_ids=unique_user_ids,
|
200 |
+
device=device,
|
201 |
+
)
|
202 |
+
|
203 |
+
ranked = rank_items(
|
204 |
+
users=user_embeddings,
|
205 |
+
items=item_embeddings,
|
206 |
+
num_items=num_ranked_items,
|
207 |
+
batch_size=128,
|
208 |
+
)
|
209 |
+
|
210 |
+
del user_embeddings
|
211 |
+
gc.collect()
|
212 |
+
|
213 |
+
hour2metrics.append(calc_metrics(ranked, targets, metrics))
|
214 |
+
|
215 |
+
del unique_user_ids
|
216 |
+
del unique_item_ids
|
217 |
+
del item_embeddings
|
218 |
+
del targets
|
219 |
+
gc.collect()
|
220 |
+
|
221 |
+
return hour2metrics
|
222 |
+
|
223 |
+
|
224 |
+
def item_knn(
|
225 |
+
data_dir: str,
|
226 |
+
size: str,
|
227 |
+
interaction: str,
|
228 |
+
device: str,
|
229 |
+
hours: list[float],
|
230 |
+
validation_metric: str,
|
231 |
+
report_metrics: list[str],
|
232 |
+
) -> dict[str, Any]:
|
233 |
+
df = scan(data_dir, size, interaction)
|
234 |
+
|
235 |
+
# hyperopt by validation
|
236 |
+
train, val, _ = preprocess(df, interaction, val_size=Constants.VAL_SIZE)
|
237 |
+
|
238 |
+
results = evaluation(train, val, device, hours, [validation_metric])
|
239 |
+
|
240 |
+
metric_name, k = validation_metric.split('@')
|
241 |
+
|
242 |
+
best_hour = hours[argmax(results, lambda x: x[metric_name][int(k)])]
|
243 |
+
|
244 |
+
print(f"FINAL HYPERPARAMS {best_hour=}")
|
245 |
+
|
246 |
+
# train final model
|
247 |
+
train, _, test = preprocess(df, interaction, val_size=0)
|
248 |
+
|
249 |
+
return evaluation(train, test, device, [best_hour], report_metrics)[0]
|
250 |
+
|
251 |
+
|
252 |
+
if __name__ == "__main__":
|
253 |
+
main()
|
benchmarks/models/popularity/main.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from pathlib import Path
|
3 |
+
from typing import Any
|
4 |
+
|
5 |
+
import click
|
6 |
+
import polars as pl
|
7 |
+
import torch
|
8 |
+
|
9 |
+
from yambda.constants import Constants
|
10 |
+
from yambda.evaluation.metrics import calc_metrics
|
11 |
+
from yambda.evaluation.ranking import Embeddings, Ranked, Targets
|
12 |
+
from yambda.processing import timesplit
|
13 |
+
from yambda.utils import argmax
|
14 |
+
|
15 |
+
|
16 |
+
@click.command()
|
17 |
+
@click.option(
|
18 |
+
'--data_dir', required=True, type=str, default="../../data/flat", show_default=True, help="Expects flat data"
|
19 |
+
)
|
20 |
+
@click.option(
|
21 |
+
'--size',
|
22 |
+
required=True,
|
23 |
+
type=click.Choice(['50m', '500m', "5b"]),
|
24 |
+
default=["50m"],
|
25 |
+
multiple=True,
|
26 |
+
show_default=True,
|
27 |
+
)
|
28 |
+
@click.option(
|
29 |
+
'--interaction',
|
30 |
+
required=True,
|
31 |
+
type=click.Choice(['likes', 'listens']),
|
32 |
+
default=["likes"],
|
33 |
+
multiple=True,
|
34 |
+
show_default=True,
|
35 |
+
)
|
36 |
+
@click.option(
|
37 |
+
'--hours',
|
38 |
+
required=True,
|
39 |
+
type=float,
|
40 |
+
default=[0.5, 1, 2, 3, 6, 12, 24],
|
41 |
+
multiple=True,
|
42 |
+
show_default=True,
|
43 |
+
help="Hyperparameter",
|
44 |
+
)
|
45 |
+
@click.option('--validation_metric', required=True, type=str, default="ndcg@100", show_default=True)
|
46 |
+
@click.option('--report_metrics', required=True, type=str, default=Constants.METRICS, multiple=True, show_default=True)
|
47 |
+
@click.option('--device', required=True, type=str, default="cuda:0", show_default=True)
|
48 |
+
def main(
|
49 |
+
data_dir: str,
|
50 |
+
size: list[str],
|
51 |
+
interaction: list[str],
|
52 |
+
hours: list[float],
|
53 |
+
validation_metric: str,
|
54 |
+
report_metrics: list[str],
|
55 |
+
device: str,
|
56 |
+
):
|
57 |
+
print(f"REPORT METRICS: {report_metrics}")
|
58 |
+
for s in size:
|
59 |
+
for i in interaction:
|
60 |
+
print(f"SIZE {s}, INTERACTION {i}")
|
61 |
+
result = popularity(
|
62 |
+
data_dir,
|
63 |
+
s,
|
64 |
+
i,
|
65 |
+
device,
|
66 |
+
hours=hours,
|
67 |
+
validation_metric=validation_metric,
|
68 |
+
report_metrics=report_metrics,
|
69 |
+
)
|
70 |
+
print(json.dumps(result, indent=2))
|
71 |
+
|
72 |
+
|
73 |
+
def scan(path: str, dataset_size: str, dataset_name: str) -> pl.LazyFrame:
|
74 |
+
path: Path = Path(path) / dataset_size / dataset_name
|
75 |
+
df = pl.scan_parquet(path.with_suffix(".parquet"))
|
76 |
+
return df
|
77 |
+
|
78 |
+
|
79 |
+
def preprocess(
|
80 |
+
df: pl.LazyFrame, interaction: str, val_size: int
|
81 |
+
) -> tuple[pl.LazyFrame, pl.LazyFrame | None, pl.LazyFrame]:
|
82 |
+
if interaction == "listens":
|
83 |
+
df = df.filter(pl.col("played_ratio_pct") >= Constants.TRACK_LISTEN_THRESHOLD)
|
84 |
+
|
85 |
+
train, val, test = timesplit.flat_split_train_val_test(
|
86 |
+
df, val_size=val_size, test_timestamp=Constants.TEST_TIMESTAMP
|
87 |
+
)
|
88 |
+
|
89 |
+
return (
|
90 |
+
train,
|
91 |
+
val.collect(engine="streaming").lazy() if val is not None else None,
|
92 |
+
test.collect(engine="streaming").lazy(),
|
93 |
+
)
|
94 |
+
|
95 |
+
|
96 |
+
def training(hour: float, train: pl.LazyFrame, max_timestamp: float, device: str, decay: float = 0.9) -> Embeddings:
|
97 |
+
if hour == 0:
|
98 |
+
embeddings = train.group_by("item_id").agg(pl.count().alias("item_embedding")).collect(engine="streaming")
|
99 |
+
else:
|
100 |
+
tau = decay ** (1 / Constants.DAY_SECONDS / (hour / 24))
|
101 |
+
|
102 |
+
embeddings = (
|
103 |
+
train.select(
|
104 |
+
"item_id",
|
105 |
+
(tau ** (max_timestamp - pl.col("timestamp"))).alias("value"),
|
106 |
+
)
|
107 |
+
.group_by("item_id")
|
108 |
+
.agg(pl.col("value").sum().alias("item_embedding"))
|
109 |
+
.collect(engine="streaming")
|
110 |
+
)
|
111 |
+
|
112 |
+
item_ids = embeddings["item_id"].to_torch().to(device)
|
113 |
+
|
114 |
+
item_embeddings = embeddings["item_embedding"].to_torch().to(device)[:, None]
|
115 |
+
|
116 |
+
return Embeddings(item_ids, item_embeddings)
|
117 |
+
|
118 |
+
|
119 |
+
def evaluation(
|
120 |
+
train: pl.LazyFrame, val: pl.LazyFrame, device: str, hours: list[float], metrics: list[str]
|
121 |
+
) -> list[dict[str, Any]]:
|
122 |
+
num_ranked_items = max([int(x.split("@")[1]) for x in metrics])
|
123 |
+
|
124 |
+
max_timestamp = train.select(pl.col("timestamp").max()).collect(engine="streaming").item()
|
125 |
+
user_ids = train.select("uid").unique().collect(engine="streaming")["uid"].to_torch().to(device)
|
126 |
+
|
127 |
+
targets = Targets.from_sequential(
|
128 |
+
val.group_by('uid', maintain_order=True).agg("item_id"),
|
129 |
+
device,
|
130 |
+
)
|
131 |
+
|
132 |
+
hour2metrics = []
|
133 |
+
for hour in hours:
|
134 |
+
item_embeddings = training(
|
135 |
+
hour=hour,
|
136 |
+
train=train,
|
137 |
+
max_timestamp=max_timestamp,
|
138 |
+
device=device,
|
139 |
+
)
|
140 |
+
|
141 |
+
ranked = Ranked(
|
142 |
+
user_ids=user_ids,
|
143 |
+
item_ids=item_embeddings.ids[torch.topk(item_embeddings.embeddings, num_ranked_items, dim=0).indices]
|
144 |
+
.ravel()
|
145 |
+
.expand((user_ids.shape[0], num_ranked_items)),
|
146 |
+
num_item_ids=item_embeddings.ids.shape[0],
|
147 |
+
)
|
148 |
+
|
149 |
+
hour2metrics.append(calc_metrics(ranked, targets, metrics))
|
150 |
+
|
151 |
+
return hour2metrics
|
152 |
+
|
153 |
+
|
154 |
+
def popularity(
|
155 |
+
data_dir: str,
|
156 |
+
size: str,
|
157 |
+
interaction: str,
|
158 |
+
device: str,
|
159 |
+
hours: list[float],
|
160 |
+
validation_metric: str,
|
161 |
+
report_metrics: list[str],
|
162 |
+
) -> dict[str, Any]:
|
163 |
+
df = scan(data_dir, size, interaction)
|
164 |
+
|
165 |
+
# hyperopt by validation
|
166 |
+
train, val, _ = preprocess(df, interaction, val_size=Constants.VAL_SIZE)
|
167 |
+
|
168 |
+
results = evaluation(train, val, device, hours, [validation_metric])
|
169 |
+
|
170 |
+
metric_name, k = validation_metric.split('@')
|
171 |
+
|
172 |
+
best_hour = hours[argmax(results, lambda x: x[metric_name][int(k)])]
|
173 |
+
|
174 |
+
print(f"FINAL HYPERPARAMS {best_hour=}")
|
175 |
+
|
176 |
+
# train final model
|
177 |
+
train, _, test = preprocess(df, interaction, val_size=0)
|
178 |
+
|
179 |
+
return evaluation(train, test, device, [best_hour], report_metrics)[0]
|
180 |
+
|
181 |
+
|
182 |
+
if __name__ == "__main__":
|
183 |
+
main()
|
benchmarks/models/random_rec/main.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
import click
|
5 |
+
import polars as pl
|
6 |
+
import torch
|
7 |
+
|
8 |
+
from yambda.constants import Constants
|
9 |
+
from yambda.evaluation.metrics import calc_metrics
|
10 |
+
from yambda.evaluation.ranking import Ranked, Targets
|
11 |
+
from yambda.processing import timesplit
|
12 |
+
from yambda.utils import mean_dicts
|
13 |
+
|
14 |
+
|
15 |
+
@click.command()
|
16 |
+
@click.option(
|
17 |
+
'--data_dir',
|
18 |
+
required=True,
|
19 |
+
type=str,
|
20 |
+
default="../../data/flat",
|
21 |
+
show_default=True,
|
22 |
+
help="Expects flat data",
|
23 |
+
)
|
24 |
+
@click.option(
|
25 |
+
'--size',
|
26 |
+
required=True,
|
27 |
+
type=click.Choice(['50m', '500m', "5b"]),
|
28 |
+
default=["50m"],
|
29 |
+
multiple=True,
|
30 |
+
show_default=True,
|
31 |
+
)
|
32 |
+
@click.option(
|
33 |
+
'--interaction',
|
34 |
+
required=True,
|
35 |
+
type=click.Choice(['likes', 'listens']),
|
36 |
+
default=["likes"],
|
37 |
+
multiple=True,
|
38 |
+
show_default=True,
|
39 |
+
)
|
40 |
+
@click.option('--device', required=True, type=str, default="cuda:0", show_default=True)
|
41 |
+
@click.option('--num_repeats', required=True, type=int, default=2, show_default=True)
|
42 |
+
def main(
|
43 |
+
data_dir: str,
|
44 |
+
size: list[str],
|
45 |
+
interaction: list[str],
|
46 |
+
device: str,
|
47 |
+
num_repeats: int,
|
48 |
+
):
|
49 |
+
print(f"calc metrics: {Constants.METRICS}")
|
50 |
+
for s in size:
|
51 |
+
for i in interaction:
|
52 |
+
print(f"SIZE {s}, INTERACTION {i}")
|
53 |
+
result = random_rec(data_dir, s, i, num_repeats, device)
|
54 |
+
print(json.dumps(result, indent=2))
|
55 |
+
|
56 |
+
|
57 |
+
def scan(path: str, dataset_size: str, dataset_name: str) -> pl.LazyFrame:
|
58 |
+
path: Path = Path(path) / dataset_size / dataset_name
|
59 |
+
return pl.scan_parquet(path.with_suffix(".parquet"))
|
60 |
+
|
61 |
+
|
62 |
+
def preprocess(
|
63 |
+
df: pl.LazyFrame, interaction: str, val_size: int
|
64 |
+
) -> tuple[pl.LazyFrame, pl.LazyFrame | None, pl.LazyFrame]:
|
65 |
+
if interaction == "listens":
|
66 |
+
df = df.filter(pl.col("played_ratio_pct") >= Constants.TRACK_LISTEN_THRESHOLD)
|
67 |
+
|
68 |
+
train, val, test = timesplit.flat_split_train_val_test(
|
69 |
+
df, val_size=val_size, test_timestamp=Constants.TEST_TIMESTAMP
|
70 |
+
)
|
71 |
+
|
72 |
+
return (
|
73 |
+
train,
|
74 |
+
val.collect(engine="streaming").lazy() if val is not None else None,
|
75 |
+
test.collect(engine="streaming").lazy(),
|
76 |
+
)
|
77 |
+
|
78 |
+
|
79 |
+
def random_rec(
|
80 |
+
data_dir: str,
|
81 |
+
size: str,
|
82 |
+
interaction: str,
|
83 |
+
num_repeats: int,
|
84 |
+
device: str,
|
85 |
+
) -> dict[str, dict[int, float]]:
|
86 |
+
df = scan(data_dir, size, interaction)
|
87 |
+
|
88 |
+
train, _, test = preprocess(df, interaction, val_size=0)
|
89 |
+
|
90 |
+
unique_user_ids = train.select("uid").unique().sort("uid").collect(engine="streaming")["uid"].to_torch().to(device)
|
91 |
+
|
92 |
+
unique_item_ids = (
|
93 |
+
train.select("item_id").unique().sort("item_id").collect(engine="streaming")["item_id"].to_torch().to(device)
|
94 |
+
)
|
95 |
+
|
96 |
+
print(f"NUM_USERS {unique_user_ids.shape[0]}, NUM_ITEMS {unique_item_ids.shape[0]}")
|
97 |
+
|
98 |
+
targets = Targets.from_sequential(
|
99 |
+
test.group_by('uid', maintain_order=True).agg("item_id"),
|
100 |
+
device,
|
101 |
+
)
|
102 |
+
|
103 |
+
metrics_list = []
|
104 |
+
|
105 |
+
for _ in range(num_repeats):
|
106 |
+
ranked = Ranked(
|
107 |
+
user_ids=unique_user_ids,
|
108 |
+
item_ids=unique_item_ids[
|
109 |
+
torch.randint(
|
110 |
+
0, unique_item_ids.shape[0] - 1, size=(unique_user_ids.shape[0], Constants.NUM_RANKED_ITEMS)
|
111 |
+
)
|
112 |
+
],
|
113 |
+
num_item_ids=unique_item_ids.shape[0],
|
114 |
+
)
|
115 |
+
|
116 |
+
metrics_list.append(
|
117 |
+
calc_metrics(
|
118 |
+
ranked,
|
119 |
+
targets,
|
120 |
+
metrics=Constants.METRICS,
|
121 |
+
)
|
122 |
+
)
|
123 |
+
|
124 |
+
return mean_dicts(metrics_list)
|
125 |
+
|
126 |
+
|
127 |
+
if __name__ == "__main__":
|
128 |
+
main()
|
benchmarks/models/sansa/README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## SANSA
|
2 |
+
|
3 |
+
Training SANSA on Yambda-500M with Listens, on Yambda-5B with Likes and on Yambda-5B with Listens is infeasible due to the memory explosion during sparse matrix multiplication.
|
4 |
+
|
5 |
+
For example, given the base weights density of $5e-5$, the sparse matrix multiplication for Yambda-500M with Listens requires $466512103 * (3004578 ^ 2 * 5e-5) = 2.1e17 $ operations.
|
6 |
+
|
7 |
+
One may observe that the good solution is to try to reduce the weights density. However given the results on Yambda-50M with Listens, further reducing density would collapse the model’s capacity, it becomes obvious that it is just practically futile.
|
8 |
+
|
9 |
+
Unfortunately the main SANSA repository is not optimized for large datasets, so even on the Yambda-50M with Listens at least 100GB of RAM is required.
|
benchmarks/models/sansa/main.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import heapq
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
from typing import Any
|
6 |
+
|
7 |
+
import click
|
8 |
+
import numpy as np
|
9 |
+
import polars as pl
|
10 |
+
import scipy.sparse as sp
|
11 |
+
import torch
|
12 |
+
from sansa import SANSA, ICFGramianFactorizerConfig, SANSAConfig, UMRUnitLowerTriangleInverterConfig
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
from yambda.constants import Constants
|
16 |
+
from yambda.evaluation import metrics, ranking
|
17 |
+
from yambda.processing import timesplit
|
18 |
+
|
19 |
+
|
20 |
+
RANDOM_SEED = 42
|
21 |
+
|
22 |
+
|
23 |
+
@click.command()
|
24 |
+
@click.option(
|
25 |
+
'--data_dir', required=True, type=str, default="../../data/flat", show_default=True, help="Expects flat data"
|
26 |
+
)
|
27 |
+
@click.option(
|
28 |
+
'--size',
|
29 |
+
required=True,
|
30 |
+
type=click.Choice(['50m', '500m']),
|
31 |
+
default="50m",
|
32 |
+
multiple=False,
|
33 |
+
show_default=True,
|
34 |
+
)
|
35 |
+
@click.option(
|
36 |
+
'--interaction',
|
37 |
+
required=True,
|
38 |
+
type=click.Choice(['likes', 'listens']),
|
39 |
+
default="likes",
|
40 |
+
multiple=False,
|
41 |
+
show_default=True,
|
42 |
+
)
|
43 |
+
@click.option('--report_metrics', required=True, type=str, default=Constants.METRICS, multiple=True, show_default=True)
|
44 |
+
@click.option('--device', required=True, type=str, default="cuda:0", show_default=True)
|
45 |
+
def main(
|
46 |
+
data_dir: str,
|
47 |
+
size: str,
|
48 |
+
interaction: str,
|
49 |
+
report_metrics: list[str],
|
50 |
+
device: str,
|
51 |
+
):
|
52 |
+
print(f"REPORT METRICS: {report_metrics}")
|
53 |
+
print(f"SIZE {size}, INTERACTION {interaction}")
|
54 |
+
result = train_sansa_model(
|
55 |
+
data_dir,
|
56 |
+
size=size,
|
57 |
+
dataset_type=interaction,
|
58 |
+
device=device,
|
59 |
+
report_metrics=report_metrics,
|
60 |
+
)
|
61 |
+
print(json.dumps(result, indent=2))
|
62 |
+
|
63 |
+
|
64 |
+
def train_sansa_model(
|
65 |
+
data_path: str,
|
66 |
+
size: str,
|
67 |
+
dataset_type: str,
|
68 |
+
device: str,
|
69 |
+
report_metrics: list[str],
|
70 |
+
) -> dict[str, Any]:
|
71 |
+
np.random.seed(RANDOM_SEED)
|
72 |
+
|
73 |
+
curr_time = time.time()
|
74 |
+
print()
|
75 |
+
print(curr_time)
|
76 |
+
print(f"Size: {size}, Dataset: {dataset_type}")
|
77 |
+
df, grouped_test, train, test = get_train_val_test_matrices(
|
78 |
+
data_path=data_path,
|
79 |
+
size=size,
|
80 |
+
dataset_type=dataset_type,
|
81 |
+
)
|
82 |
+
data_finished = time.time()
|
83 |
+
print(f"Data is loaded in {data_finished - curr_time} seconds")
|
84 |
+
|
85 |
+
model = get_sansa_model()
|
86 |
+
model.fit(train)
|
87 |
+
train_finished = time.time()
|
88 |
+
|
89 |
+
print(f"Model is trained in {train_finished - data_finished}")
|
90 |
+
print(model)
|
91 |
+
|
92 |
+
if report_metrics:
|
93 |
+
calculated_metrics = evaluate_sansa(
|
94 |
+
df=df,
|
95 |
+
model=model,
|
96 |
+
device=device,
|
97 |
+
report_metrics=report_metrics,
|
98 |
+
grouped_test=grouped_test,
|
99 |
+
sparse_train=train,
|
100 |
+
sparse_test=test,
|
101 |
+
)
|
102 |
+
|
103 |
+
print(f"Model is evaluated in {time.time() - train_finished}")
|
104 |
+
|
105 |
+
return calculated_metrics
|
106 |
+
|
107 |
+
return {}
|
108 |
+
|
109 |
+
|
110 |
+
def get_train_val_test_matrices(
|
111 |
+
data_path: str,
|
112 |
+
size: str = "50m",
|
113 |
+
dataset_type: str = "likes",
|
114 |
+
) -> tuple[pl.LazyFrame, pl.LazyFrame, sp.csr_matrix, sp.csr_matrix]:
|
115 |
+
df = pl.scan_parquet(os.path.join(os.path.join(data_path, size, f"{dataset_type}.parquet")))
|
116 |
+
|
117 |
+
if dataset_type == "listens":
|
118 |
+
df = df.filter(pl.col("played_ratio_pct") >= Constants.TRACK_LISTEN_THRESHOLD)
|
119 |
+
|
120 |
+
flat_train, _, flat_test = timesplit.flat_split_train_val_test(
|
121 |
+
df, val_size=0, test_timestamp=Constants.TEST_TIMESTAMP
|
122 |
+
)
|
123 |
+
|
124 |
+
all_uids = set(flat_train.collect().get_column("uid").to_list())
|
125 |
+
all_items = set(flat_train.collect().get_column("item_id").to_list())
|
126 |
+
|
127 |
+
print(f"Dataset, users_num: {len(all_uids)}, items_num: {len(all_items)}")
|
128 |
+
|
129 |
+
# Create mapping to create sparse matrix
|
130 |
+
uid_to_idx = {uid: i for i, uid in enumerate(all_uids)}
|
131 |
+
item_id_to_idx = {item_id: i for i, item_id in enumerate(all_items)}
|
132 |
+
|
133 |
+
sparse_train, _ = get_sparse_data(flat_train, uid_to_idx, item_id_to_idx)
|
134 |
+
sparse_test, grouped_test = get_sparse_data(flat_test, uid_to_idx, item_id_to_idx)
|
135 |
+
|
136 |
+
print(f"Sparse train shape: {sparse_train.shape}, test shape: {sparse_test.shape}")
|
137 |
+
|
138 |
+
return df, grouped_test, sparse_train, sparse_test
|
139 |
+
|
140 |
+
|
141 |
+
def get_sparse_data(
|
142 |
+
df: pl.LazyFrame, uid_to_idx: dict[int, int], item_id_to_idx: dict[int, int]
|
143 |
+
) -> tuple[sp.csr_matrix, pl.LazyFrame]:
|
144 |
+
df = df.with_columns(
|
145 |
+
pl.col("uid").replace_strict(uid_to_idx).alias("uid"),
|
146 |
+
pl.col("item_id").replace_strict(item_id_to_idx, default=len(item_id_to_idx)).alias("item_id"),
|
147 |
+
pl.lit(1).alias("action"),
|
148 |
+
)
|
149 |
+
|
150 |
+
grouped_df = df.group_by('uid', maintain_order=True).agg(
|
151 |
+
[pl.col('item_id').alias('item_id'), pl.col('action').alias('actions')]
|
152 |
+
)
|
153 |
+
|
154 |
+
rows = []
|
155 |
+
cols = []
|
156 |
+
values = []
|
157 |
+
|
158 |
+
for user_id, item_ids, actions in tqdm(grouped_df.select('uid', 'item_id', 'actions').collect().rows()):
|
159 |
+
rows.extend([user_id] * len(item_ids))
|
160 |
+
cols.extend(item_ids)
|
161 |
+
values.extend(actions)
|
162 |
+
|
163 |
+
user_item_data = sp.csr_matrix(
|
164 |
+
(values, (rows, cols)),
|
165 |
+
dtype=np.float32,
|
166 |
+
shape=(len(uid_to_idx), len(item_id_to_idx) + 1), # +1 for default unknown test items
|
167 |
+
)
|
168 |
+
|
169 |
+
return user_item_data, grouped_df
|
170 |
+
|
171 |
+
|
172 |
+
def get_sansa_model() -> SANSA:
|
173 |
+
factorizer_config = ICFGramianFactorizerConfig(
|
174 |
+
# reordering_use_long=True,
|
175 |
+
factorization_shift_step=1e-3, # initial diagonal shift if incomplete factorization fails
|
176 |
+
factorization_shift_multiplier=2.0, # multiplier for the shift for subsequent attempts
|
177 |
+
)
|
178 |
+
|
179 |
+
inverter_config = UMRUnitLowerTriangleInverterConfig(
|
180 |
+
scans=1, # number of scans through all columns of the matrix
|
181 |
+
finetune_steps=15, # number of finetuning steps, targeting worst columns
|
182 |
+
)
|
183 |
+
|
184 |
+
config = SANSAConfig(
|
185 |
+
l2=10.0, # regularization strength
|
186 |
+
weight_matrix_density=5e-5, # desired density of weights
|
187 |
+
gramian_factorizer_config=factorizer_config, # factorizer configuration
|
188 |
+
lower_triangle_inverter_config=inverter_config, # inverter configuration
|
189 |
+
)
|
190 |
+
|
191 |
+
print(config)
|
192 |
+
|
193 |
+
model = SANSA(config)
|
194 |
+
|
195 |
+
return model
|
196 |
+
|
197 |
+
|
198 |
+
def evaluate_sansa(
|
199 |
+
df: pl.LazyFrame,
|
200 |
+
model: SANSA,
|
201 |
+
device: str,
|
202 |
+
report_metrics: list[str],
|
203 |
+
grouped_test: pl.LazyFrame,
|
204 |
+
sparse_train: sp.csr_matrix,
|
205 |
+
sparse_test: sp.csr_matrix,
|
206 |
+
) -> dict[str, Any]:
|
207 |
+
num_items_for_metrics = len(set(df.collect().get_column("item_id").to_list()))
|
208 |
+
print(num_items_for_metrics)
|
209 |
+
|
210 |
+
test_targets = ranking.Targets.from_sequential(grouped_test, device=device)
|
211 |
+
print(len(test_targets.user_ids))
|
212 |
+
|
213 |
+
# to free some RAM
|
214 |
+
del df, grouped_test
|
215 |
+
|
216 |
+
train_pred_sparse = model.forward(sparse_train)
|
217 |
+
print(f"Train prediction shape: {train_pred_sparse.shape}")
|
218 |
+
|
219 |
+
A = train_pred_sparse
|
220 |
+
num_users = A.shape[0]
|
221 |
+
num_items_k = 150
|
222 |
+
|
223 |
+
# 0 if there is no such item
|
224 |
+
top_items_idx = np.full((num_users, num_items_k), 0, dtype=int)
|
225 |
+
|
226 |
+
# -1 score if there is no such item
|
227 |
+
top_items_score = np.full((num_users, num_items_k), -1, dtype=A.data.dtype)
|
228 |
+
|
229 |
+
for row in tqdm(range(num_users)):
|
230 |
+
start, end = A.indptr[row], A.indptr[row + 1]
|
231 |
+
row_scores = A.data[start:end]
|
232 |
+
row_cols = A.indices[start:end]
|
233 |
+
|
234 |
+
if len(row_scores) == 0:
|
235 |
+
continue
|
236 |
+
|
237 |
+
k_here = min(num_items_k, len(row_scores))
|
238 |
+
top_k = heapq.nlargest(k_here, zip(row_scores, row_cols), key=lambda x: x[0])
|
239 |
+
|
240 |
+
# Fill in
|
241 |
+
for i, (score, idx) in enumerate(top_k):
|
242 |
+
top_items_idx[row, i] = idx
|
243 |
+
top_items_score[row, i] = score
|
244 |
+
|
245 |
+
user_ids = torch.arange(top_items_idx.shape[0], dtype=torch.int32, device="cpu")
|
246 |
+
print(user_ids.shape)
|
247 |
+
|
248 |
+
scores = torch.as_tensor(top_items_score, dtype=torch.float32, device="cpu")
|
249 |
+
print(scores.shape)
|
250 |
+
|
251 |
+
scores_indices = torch.as_tensor(top_items_idx, dtype=torch.long, device="cpu")
|
252 |
+
print(scores_indices.shape)
|
253 |
+
|
254 |
+
targets = torch.as_tensor(sparse_test.toarray(), dtype=torch.bool, device="cpu")
|
255 |
+
print(targets.shape)
|
256 |
+
|
257 |
+
targets = targets.to(dtype=torch.bool, device=device)
|
258 |
+
not_zero_user_indices = targets.any(dim=1)
|
259 |
+
print(torch.sum(not_zero_user_indices))
|
260 |
+
|
261 |
+
not_zero_user_indices = not_zero_user_indices.to(dtype=torch.bool, device="cpu")
|
262 |
+
|
263 |
+
user_ids = user_ids[not_zero_user_indices]
|
264 |
+
scores = scores[not_zero_user_indices]
|
265 |
+
print(f"After removing zero users scores shape: {scores.shape}, targets shape: {targets.shape}")
|
266 |
+
|
267 |
+
scores_indices = scores_indices[not_zero_user_indices]
|
268 |
+
print(scores_indices.shape)
|
269 |
+
|
270 |
+
test_ranked = ranking.Ranked(
|
271 |
+
user_ids=user_ids.to(device),
|
272 |
+
scores=scores.to(device),
|
273 |
+
item_ids=scores_indices.to(device),
|
274 |
+
num_item_ids=num_items_for_metrics,
|
275 |
+
)
|
276 |
+
|
277 |
+
calculated_metrics = metrics.calc_metrics(test_ranked, test_targets, report_metrics)
|
278 |
+
print(calculated_metrics)
|
279 |
+
|
280 |
+
return calculated_metrics
|
281 |
+
|
282 |
+
|
283 |
+
if __name__ == "__main__":
|
284 |
+
main()
|
benchmarks/models/sasrec/data.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from dataclasses import dataclass, field
|
3 |
+
from functools import cached_property
|
4 |
+
from typing import Dict, List
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import polars as pl
|
8 |
+
import torch
|
9 |
+
|
10 |
+
from yambda.constants import Constants
|
11 |
+
from yambda.processing import timesplit
|
12 |
+
|
13 |
+
|
14 |
+
logger = logging.getLogger(__name__)
|
15 |
+
|
16 |
+
|
17 |
+
@dataclass
|
18 |
+
class Data:
|
19 |
+
train: pl.LazyFrame
|
20 |
+
validation: pl.LazyFrame | None
|
21 |
+
test: pl.LazyFrame
|
22 |
+
item_id_to_idx: dict[int, int]
|
23 |
+
|
24 |
+
_train_user_ids: torch.Tensor | None = field(init=False, default=None)
|
25 |
+
|
26 |
+
@property
|
27 |
+
def num_items(self):
|
28 |
+
return len(self.item_id_to_idx)
|
29 |
+
|
30 |
+
@cached_property
|
31 |
+
def num_train_users(self):
|
32 |
+
return self.train.select(pl.len()).collect(engine="streaming").item()
|
33 |
+
|
34 |
+
def train_user_ids(self, device):
|
35 |
+
if self._train_user_ids is None or self._train_user_ids.device != device:
|
36 |
+
self._train_user_ids = self.train.select('uid').collect(engine="streaming")['uid'].to_torch().to(device)
|
37 |
+
return self._train_user_ids
|
38 |
+
|
39 |
+
|
40 |
+
def preprocess(df: pl.LazyFrame, interaction: str, val_size=Constants.VAL_SIZE, max_seq_len: int = 200) -> Data:
|
41 |
+
"""
|
42 |
+
Preprocesses raw interaction data for recommendation system modeling.
|
43 |
+
|
44 |
+
Args:
|
45 |
+
df (pl.LazyFrame): Raw input data containing user interaction sequences
|
46 |
+
interaction (str): Type of interaction to process. Must be either 'likes' or 'listens'.
|
47 |
+
val_size (float): Proportion of data to use for validation (default: from Constants)
|
48 |
+
|
49 |
+
Returns:
|
50 |
+
Data: Named tuple containing:
|
51 |
+
- train (pl.LazyFrame): Training data
|
52 |
+
- val (pl.LazyFrame): Validation data
|
53 |
+
- test (pl.LazyFrame): Test data
|
54 |
+
- item_id_to_idx (dict): Mapping from original item IDs to model indices
|
55 |
+
|
56 |
+
Note:
|
57 |
+
- For 'listens' interactions, uses strict engagement threshold
|
58 |
+
- Item indices start at 1 to reserve 0 for padding/masking
|
59 |
+
"""
|
60 |
+
if interaction == 'listens':
|
61 |
+
df = df.select(
|
62 |
+
'uid',
|
63 |
+
pl.col('item_id', 'timestamp').list.gather(
|
64 |
+
pl.col('played_ratio_pct').list.eval(pl.arg_where(pl.element() >= Constants.TRACK_LISTEN_THRESHOLD))
|
65 |
+
),
|
66 |
+
).filter(pl.col('item_id').list.len() > 0)
|
67 |
+
|
68 |
+
unique_item_ids = (
|
69 |
+
df.select(pl.col("item_id").explode().unique().sort()).collect(engine="streaming")["item_id"].to_list()
|
70 |
+
)
|
71 |
+
|
72 |
+
item_id_to_idx = {int(item_id): i + 1 for i, item_id in enumerate(unique_item_ids)}
|
73 |
+
|
74 |
+
train, val, test = timesplit.sequential_split_train_val_test(
|
75 |
+
df, val_size=val_size, test_timestamp=Constants.TEST_TIMESTAMP, drop_non_train_items=False
|
76 |
+
)
|
77 |
+
|
78 |
+
def replace_strict(df):
|
79 |
+
return (
|
80 |
+
df.select(
|
81 |
+
pl.col("item_id").list.eval(pl.element().replace_strict(item_id_to_idx)),
|
82 |
+
pl.all().exclude("item_id"),
|
83 |
+
)
|
84 |
+
.collect(engine="streaming")
|
85 |
+
.lazy()
|
86 |
+
)
|
87 |
+
|
88 |
+
# polars requires too much memory for replace strict if list is too big
|
89 |
+
train = train.select('uid', pl.all().exclude('uid').list.slice(-max_seq_len, max_seq_len))
|
90 |
+
train = replace_strict(train)
|
91 |
+
|
92 |
+
if val is not None:
|
93 |
+
val = replace_strict(val)
|
94 |
+
|
95 |
+
test = replace_strict(test)
|
96 |
+
|
97 |
+
return Data(train, val, test, item_id_to_idx)
|
98 |
+
|
99 |
+
|
100 |
+
class TrainDataset:
|
101 |
+
def __init__(self, dataset: pl.DataFrame, num_items: int, max_seq_len: int):
|
102 |
+
self._dataset = dataset
|
103 |
+
self._num_items = num_items
|
104 |
+
self._max_seq_len = max_seq_len
|
105 |
+
|
106 |
+
@property
|
107 |
+
def dataset(self) -> pl.DataFrame:
|
108 |
+
return self._dataset
|
109 |
+
|
110 |
+
def __len__(self) -> int:
|
111 |
+
return len(self._dataset)
|
112 |
+
|
113 |
+
def __getitem__(self, index: int) -> Dict[str, List[int] | int]:
|
114 |
+
sample = self._dataset.row(index, named=True)
|
115 |
+
|
116 |
+
item_sequence = sample['item_id'][:-1][-self._max_seq_len :]
|
117 |
+
positive_sequence = sample['item_id'][1:][-self._max_seq_len :]
|
118 |
+
negative_sequence = np.random.randint(1, self._num_items + 1, size=(len(item_sequence),)).tolist()
|
119 |
+
|
120 |
+
return {
|
121 |
+
'user.ids': [sample['uid']],
|
122 |
+
'user.length': 1,
|
123 |
+
'item.ids': item_sequence,
|
124 |
+
'item.length': len(item_sequence),
|
125 |
+
'positive.ids': positive_sequence,
|
126 |
+
'positive.length': len(positive_sequence),
|
127 |
+
'negative.ids': negative_sequence,
|
128 |
+
'negative.length': len(negative_sequence),
|
129 |
+
}
|
130 |
+
|
131 |
+
|
132 |
+
class EvalDataset:
|
133 |
+
def __init__(self, dataset: pl.DataFrame, max_seq_len: int):
|
134 |
+
self._dataset = dataset
|
135 |
+
self._max_seq_len = max_seq_len
|
136 |
+
|
137 |
+
@property
|
138 |
+
def dataset(self) -> pl.DataFrame:
|
139 |
+
return self._dataset
|
140 |
+
|
141 |
+
def __len__(self) -> int:
|
142 |
+
return len(self._dataset)
|
143 |
+
|
144 |
+
def __getitem__(self, index: int) -> Dict[str, List[int] | int]:
|
145 |
+
sample = self._dataset.row(index, named=True)
|
146 |
+
|
147 |
+
item_sequence = sample['item_id_train'][-self._max_seq_len :]
|
148 |
+
next_items = sample['item_id_valid']
|
149 |
+
|
150 |
+
return {
|
151 |
+
'user.ids': [sample['uid']],
|
152 |
+
'user.length': 1,
|
153 |
+
'item.ids': item_sequence,
|
154 |
+
'item.length': len(item_sequence),
|
155 |
+
'labels.ids': next_items,
|
156 |
+
'labels.length': len(next_items),
|
157 |
+
}
|
158 |
+
|
159 |
+
|
160 |
+
def collate_fn(batch: List[Dict]) -> Dict[str, torch.Tensor]:
|
161 |
+
"""
|
162 |
+
Collates a batch of samples into batched tensors suitable for model input.
|
163 |
+
|
164 |
+
This function processes a list of dictionaries, each containing keys like '{prefix}.ids'
|
165 |
+
and '{prefix}.length' (the length of the sequence for that prefix). For each such prefix, it:
|
166 |
+
- Concatenates all '{prefix}.ids' lists from the batch into a single flat list.
|
167 |
+
- Collects all '{prefix}.length' values into a list.
|
168 |
+
- Converts the resulting lists into torch.LongTensor objects.
|
169 |
+
|
170 |
+
Args:
|
171 |
+
batch (List[Dict]): List of sample dictionaries. Each sample must contain keys of the form
|
172 |
+
'{prefix}.ids' (list of ints) and '{prefix}.length' (int).
|
173 |
+
|
174 |
+
Returns:
|
175 |
+
Dict[str, torch.Tensor]: Dictionary with keys '{prefix}.ids' and '{prefix}.length' for each prefix,
|
176 |
+
where values are 1D torch.LongTensor objects suitable for model input.
|
177 |
+
"""
|
178 |
+
processed_batch = {}
|
179 |
+
for key in batch[0].keys():
|
180 |
+
if key.endswith('.ids'):
|
181 |
+
prefix = key.split('.')[0]
|
182 |
+
assert '{}.length'.format(prefix) in batch[0]
|
183 |
+
|
184 |
+
processed_batch[f'{prefix}.ids'] = []
|
185 |
+
processed_batch[f'{prefix}.length'] = []
|
186 |
+
|
187 |
+
for sample in batch:
|
188 |
+
processed_batch[f'{prefix}.ids'].extend(sample[f'{prefix}.ids'])
|
189 |
+
processed_batch[f'{prefix}.length'].append(sample[f'{prefix}.length'])
|
190 |
+
|
191 |
+
for part, values in processed_batch.items():
|
192 |
+
processed_batch[part] = torch.tensor(values, dtype=torch.long)
|
193 |
+
|
194 |
+
return processed_batch
|
benchmarks/models/sasrec/eval.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import pathlib as Path
|
3 |
+
import random
|
4 |
+
|
5 |
+
import click
|
6 |
+
import numpy as np
|
7 |
+
import polars as pl
|
8 |
+
import torch
|
9 |
+
from model import SASRecEncoder
|
10 |
+
from torch.utils.data import DataLoader
|
11 |
+
|
12 |
+
from data import Data, EvalDataset, collate_fn, preprocess
|
13 |
+
from yambda.evaluation.metrics import calc_metrics
|
14 |
+
from yambda.evaluation.ranking import Embeddings, Targets, rank_items
|
15 |
+
|
16 |
+
|
17 |
+
logging.basicConfig(
|
18 |
+
level=logging.DEBUG, format='[%(asctime)s] [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S'
|
19 |
+
)
|
20 |
+
logger = logging.getLogger(__name__)
|
21 |
+
|
22 |
+
|
23 |
+
def infer_users(eval_dataloader: DataLoader, model: torch.nn.Module, device: str):
|
24 |
+
user_ids = []
|
25 |
+
user_embeddings = []
|
26 |
+
|
27 |
+
model.eval()
|
28 |
+
for batch in eval_dataloader:
|
29 |
+
for key in batch.keys():
|
30 |
+
batch[key] = batch[key].to(device)
|
31 |
+
|
32 |
+
user_ids.append(batch['user.ids']) # (batch_size)
|
33 |
+
user_embeddings.append(model(batch)) # (batch_size, embedding_dim)
|
34 |
+
|
35 |
+
return torch.cat(user_ids, dim=0), torch.cat(user_embeddings, dim=0)
|
36 |
+
|
37 |
+
|
38 |
+
def infer_items(model: SASRecEncoder):
|
39 |
+
return model.item_embeddings.weight.data
|
40 |
+
|
41 |
+
|
42 |
+
@click.command()
|
43 |
+
@click.option('--exp_name', required=True, type=str)
|
44 |
+
@click.option('--data_dir', required=True, type=str, default='../../data/', show_default=True)
|
45 |
+
@click.option(
|
46 |
+
'--size',
|
47 |
+
required=True,
|
48 |
+
type=click.Choice(['50m', '500m', '5b']),
|
49 |
+
default='50m',
|
50 |
+
show_default=True,
|
51 |
+
)
|
52 |
+
@click.option(
|
53 |
+
'--interaction',
|
54 |
+
required=True,
|
55 |
+
type=click.Choice(['likes', 'listens']),
|
56 |
+
default='likes',
|
57 |
+
show_default=True,
|
58 |
+
)
|
59 |
+
@click.option('--batch_size', required=True, type=int, default=256, show_default=True)
|
60 |
+
@click.option('--max_seq_len', required=False, type=int, default=200, show_default=True)
|
61 |
+
@click.option('--seed', required=False, type=int, default=42, show_default=True)
|
62 |
+
@click.option('--device', required=True, type=str, default='cuda:0', show_default=True)
|
63 |
+
def main(
|
64 |
+
exp_name: str,
|
65 |
+
data_dir: str,
|
66 |
+
size: str,
|
67 |
+
interaction: str,
|
68 |
+
batch_size: int,
|
69 |
+
max_seq_len: int,
|
70 |
+
seed: int,
|
71 |
+
device: str,
|
72 |
+
):
|
73 |
+
random.seed(seed)
|
74 |
+
np.random.seed(seed)
|
75 |
+
torch.manual_seed(seed)
|
76 |
+
torch.cuda.manual_seed_all(seed)
|
77 |
+
torch.set_float32_matmul_precision('high')
|
78 |
+
|
79 |
+
path = Path.Path(data_dir) / 'sequential' / size / interaction
|
80 |
+
df = pl.scan_parquet(path.with_suffix('.parquet'))
|
81 |
+
|
82 |
+
logger.debug('Preprocessing data...')
|
83 |
+
data: Data = preprocess(df, interaction, val_size=0, max_seq_len=max_seq_len)
|
84 |
+
train_df = data.train.collect(engine="streaming")
|
85 |
+
eval_df = data.test.collect(engine="streaming")
|
86 |
+
logger.debug('Preprocessing data has finished!')
|
87 |
+
|
88 |
+
eval_df = train_df.join(eval_df, on='uid', how='inner', suffix='_valid').select(
|
89 |
+
pl.col('uid'), pl.col('item_id').alias('item_id_train'), pl.col('item_id_valid')
|
90 |
+
)
|
91 |
+
eval_dataset = EvalDataset(dataset=eval_df, max_seq_len=max_seq_len)
|
92 |
+
|
93 |
+
eval_dataloader = DataLoader(
|
94 |
+
dataset=eval_dataset,
|
95 |
+
batch_size=batch_size,
|
96 |
+
collate_fn=collate_fn,
|
97 |
+
drop_last=False,
|
98 |
+
shuffle=True,
|
99 |
+
)
|
100 |
+
|
101 |
+
model = torch.load(f'./checkpoints/{exp_name}_best_state.pth', weights_only=False).to(device)
|
102 |
+
model.eval()
|
103 |
+
with torch.inference_mode():
|
104 |
+
user_ids, user_embeddings = infer_users(eval_dataloader=eval_dataloader, model=model, device=device)
|
105 |
+
|
106 |
+
item_embeddings = infer_items(model=model)
|
107 |
+
|
108 |
+
item_embeddings = Embeddings(
|
109 |
+
ids=torch.arange(start=0, end=item_embeddings.shape[0], device=device), embeddings=item_embeddings
|
110 |
+
)
|
111 |
+
user_embeddings = Embeddings(ids=user_ids, embeddings=user_embeddings)
|
112 |
+
|
113 |
+
df_user_ids = torch.tensor(eval_df['uid'].to_list(), dtype=torch.long, device=device)
|
114 |
+
df_target_ids = [
|
115 |
+
torch.tensor(item_ids, dtype=torch.long, device=device) for item_ids in eval_df['item_id_valid'].to_list()
|
116 |
+
]
|
117 |
+
targets = Targets(user_ids=df_user_ids, item_ids=df_target_ids)
|
118 |
+
with torch.no_grad():
|
119 |
+
ranked = rank_items(users=user_embeddings, items=item_embeddings, num_items=100)
|
120 |
+
|
121 |
+
metric_names = [f'{name}@{k}' for name in ["recall", "ndcg", "coverage"] for k in [10, 50, 100]]
|
122 |
+
metrics = calc_metrics(ranked, targets, metrics=metric_names)
|
123 |
+
print(metrics)
|
124 |
+
|
125 |
+
|
126 |
+
if __name__ == '__main__':
|
127 |
+
main()
|
benchmarks/models/sasrec/model.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Tuple
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
|
6 |
+
|
7 |
+
def create_masked_tensor(data: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
8 |
+
"""
|
9 |
+
Converts a batch of variable-length sequences into a padded tensor and corresponding mask.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
data (torch.Tensor): Input tensor containing flattened sequences.
|
13 |
+
- For indices: shape (total_elements,) of dtype long
|
14 |
+
- For embeddings: shape (total_elements, embedding_dim)
|
15 |
+
lengths (torch.Tensor): 1D tensor of sequence lengths, shape (batch_size,)
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
Tuple[torch.Tensor, torch.Tensor]:
|
19 |
+
- padded_tensor: Padded tensor of shape:
|
20 |
+
- (batch_size, max_seq_len) for indices
|
21 |
+
- (batch_size, max_seq_len, embedding_dim) for embeddings
|
22 |
+
- mask: Boolean mask of shape (batch_size, max_seq_len) where True indicates valid elements
|
23 |
+
|
24 |
+
Note:
|
25 |
+
- Zero-padding is added to the right of shorter sequences
|
26 |
+
"""
|
27 |
+
batch_size = lengths.shape[0]
|
28 |
+
max_sequence_length = int(lengths.max().item())
|
29 |
+
|
30 |
+
if len(data.shape) == 1: # indices
|
31 |
+
padded_tensor = torch.zeros(
|
32 |
+
batch_size, max_sequence_length, dtype=data.dtype, device=data.device
|
33 |
+
) # (batch_size, max_seq_len)
|
34 |
+
else:
|
35 |
+
assert len(data.shape) == 2 # embeddings
|
36 |
+
padded_tensor = torch.zeros(
|
37 |
+
batch_size, max_sequence_length, *data.shape[1:], dtype=data.dtype, device=data.device
|
38 |
+
) # (batch_size, max_seq_len, embedding_dim)
|
39 |
+
|
40 |
+
mask = (
|
41 |
+
torch.arange(end=max_sequence_length, device=lengths.device)[None] < lengths[:, None]
|
42 |
+
) # (batch_size, max_seq_len)
|
43 |
+
|
44 |
+
padded_tensor[mask] = data
|
45 |
+
|
46 |
+
return padded_tensor, mask
|
47 |
+
|
48 |
+
|
49 |
+
class SASRecEncoder(nn.Module):
|
50 |
+
def __init__(
|
51 |
+
self,
|
52 |
+
num_items: int,
|
53 |
+
max_sequence_length: int,
|
54 |
+
embedding_dim: int,
|
55 |
+
num_heads: int,
|
56 |
+
num_layers: int,
|
57 |
+
dim_feedforward: int | None = None,
|
58 |
+
dropout: float = 0.0,
|
59 |
+
activation: nn.Module = nn.GELU(),
|
60 |
+
layer_norm_eps: float = 1e-9,
|
61 |
+
initializer_range: float = 0.02,
|
62 |
+
) -> None:
|
63 |
+
super().__init__()
|
64 |
+
self._num_items = num_items
|
65 |
+
self._num_heads = num_heads
|
66 |
+
self._embedding_dim = embedding_dim
|
67 |
+
|
68 |
+
self._item_embeddings = nn.Embedding(
|
69 |
+
num_embeddings=num_items + 1, # add zero id embedding
|
70 |
+
embedding_dim=embedding_dim,
|
71 |
+
)
|
72 |
+
self._position_embeddings = nn.Embedding(num_embeddings=max_sequence_length, embedding_dim=embedding_dim)
|
73 |
+
|
74 |
+
self._layernorm = nn.LayerNorm(embedding_dim, eps=layer_norm_eps)
|
75 |
+
self._dropout = nn.Dropout(dropout)
|
76 |
+
|
77 |
+
transformer_encoder_layer = nn.TransformerEncoderLayer(
|
78 |
+
d_model=embedding_dim,
|
79 |
+
nhead=num_heads,
|
80 |
+
dim_feedforward=dim_feedforward or 4 * embedding_dim,
|
81 |
+
dropout=dropout,
|
82 |
+
activation=activation,
|
83 |
+
layer_norm_eps=layer_norm_eps,
|
84 |
+
batch_first=True,
|
85 |
+
)
|
86 |
+
self._encoder = nn.TransformerEncoder(transformer_encoder_layer, num_layers)
|
87 |
+
|
88 |
+
self._init_weights(initializer_range)
|
89 |
+
|
90 |
+
@property
|
91 |
+
def item_embeddings(self) -> nn.Module:
|
92 |
+
return self._item_embeddings
|
93 |
+
|
94 |
+
@property
|
95 |
+
def num_items(self) -> int:
|
96 |
+
return self._num_items
|
97 |
+
|
98 |
+
def _apply_sequential_encoder(self, events: torch.Tensor, lengths: torch.Tensor):
|
99 |
+
"""
|
100 |
+
Processes variable-length event sequences through a transformer encoder with positional embeddings.
|
101 |
+
|
102 |
+
Args:
|
103 |
+
events (torch.Tensor): Flattened tensor of event indices, shape (total_events,)
|
104 |
+
lengths (torch.Tensor): 1D tensor of sequence lengths, shape (batch_size,)
|
105 |
+
|
106 |
+
Returns:
|
107 |
+
Tuple[torch.Tensor, torch.Tensor]:
|
108 |
+
- embeddings: Processed sequence embeddings, shape (batch_size, seq_len, embedding_dim)
|
109 |
+
- mask: Boolean mask indicating valid elements, shape (batch_size, seq_len)
|
110 |
+
|
111 |
+
Processing Steps:
|
112 |
+
1. Embedding Lookup:
|
113 |
+
- Converts event indices to dense embeddings
|
114 |
+
2. Positional Encoding:
|
115 |
+
- Generates reverse-order positions (newest event first)
|
116 |
+
- Adds positional embeddings to item embeddings
|
117 |
+
3. Transformer Processing:
|
118 |
+
- Applies layer norm and dropout
|
119 |
+
- Uses causal attention mask for autoregressive modeling
|
120 |
+
- Uses padding mask to ignore invalid positions
|
121 |
+
|
122 |
+
Note:
|
123 |
+
- Position indices are generated in reverse chronological order (newest event = position 0)
|
124 |
+
"""
|
125 |
+
embeddings = self._item_embeddings(events) # (total_batch_events, embedding_dim)
|
126 |
+
|
127 |
+
embeddings, mask = create_masked_tensor(
|
128 |
+
data=embeddings, lengths=lengths
|
129 |
+
) # (batch_size, seq_len, embedding_dim), (batch_size, seq_len)
|
130 |
+
|
131 |
+
batch_size = mask.shape[0]
|
132 |
+
seq_len = mask.shape[1]
|
133 |
+
|
134 |
+
positions = (
|
135 |
+
torch.arange(start=seq_len - 1, end=-1, step=-1, device=mask.device)[None].tile([batch_size, 1]).long()
|
136 |
+
) # (batch_size, seq_len)
|
137 |
+
positions_mask = positions < lengths[:, None] # (batch_size, max_seq_len)
|
138 |
+
|
139 |
+
positions = positions[positions_mask] # (total_batch_events)
|
140 |
+
position_embeddings = self._position_embeddings(positions) # (total_batch_events, embedding_dim)
|
141 |
+
position_embeddings, _ = create_masked_tensor(
|
142 |
+
data=position_embeddings, lengths=lengths
|
143 |
+
) # (batch_size, seq_len, embedding_dim)
|
144 |
+
|
145 |
+
embeddings = embeddings + position_embeddings # (batch_size, seq_len, embedding_dim)
|
146 |
+
embeddings = self._layernorm(embeddings) # (batch_size, seq_len, embedding_dim)
|
147 |
+
embeddings = self._dropout(embeddings) # (batch_size, seq_len, embedding_dim)
|
148 |
+
embeddings[~mask] = 0
|
149 |
+
|
150 |
+
causal_mask = torch.tril(torch.ones(seq_len, seq_len)).bool().to(mask.device) # (seq_len, seq_len)
|
151 |
+
embeddings = self._encoder(
|
152 |
+
src=embeddings, mask=~causal_mask, src_key_padding_mask=~mask
|
153 |
+
) # (batch_size, seq_len, embedding_dim)
|
154 |
+
|
155 |
+
return embeddings, mask
|
156 |
+
|
157 |
+
@torch.no_grad()
|
158 |
+
def _init_weights(self, initializer_range: float) -> None:
|
159 |
+
"""
|
160 |
+
Initialize all model parameters (weights and biases) in-place.
|
161 |
+
|
162 |
+
For each parameter in the model:
|
163 |
+
- If the parameter name contains 'weight':
|
164 |
+
- If it also contains 'norm' (e.g., for normalization layers), initialize with ones.
|
165 |
+
- Otherwise, initialize with a truncated normal distribution (mean=0, std=initializer_range)
|
166 |
+
and values clipped to the range [-2 * initializer_range, 2 * initializer_range].
|
167 |
+
- If the parameter name contains 'bias', initialize with zeros.
|
168 |
+
- If the parameter name does not match either case, raise a ValueError.
|
169 |
+
|
170 |
+
Args:
|
171 |
+
initializer_range (float): Standard deviation for the truncated normal distribution
|
172 |
+
used to initialize non-normalization weights.
|
173 |
+
|
174 |
+
Note:
|
175 |
+
This method should be called during model initialization to ensure all weights and biases
|
176 |
+
are properly set. It runs in a no-grad context and does not track gradients.
|
177 |
+
"""
|
178 |
+
for key, value in self.named_parameters():
|
179 |
+
if 'weight' in key:
|
180 |
+
if 'norm' in key:
|
181 |
+
nn.init.ones_(value.data)
|
182 |
+
else:
|
183 |
+
nn.init.trunc_normal_(
|
184 |
+
value.data, std=initializer_range, a=-2 * initializer_range, b=2 * initializer_range
|
185 |
+
)
|
186 |
+
else:
|
187 |
+
assert 'bias' in key
|
188 |
+
nn.init.zeros_(value.data)
|
189 |
+
|
190 |
+
@staticmethod
|
191 |
+
def _get_last_embedding(embeddings: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
|
192 |
+
"""
|
193 |
+
Extracts the embedding of the last valid (non-padded) element from each sequence in a batch.
|
194 |
+
|
195 |
+
Args:
|
196 |
+
embeddings (torch.Tensor): Tensor of shape (batch_size, seq_len, embedding_dim)
|
197 |
+
containing embeddings for each element in each sequence.
|
198 |
+
mask (torch.Tensor): Boolean tensor of shape (batch_size, seq_len) indicating
|
199 |
+
valid (True) and padded (False) positions in each sequence.
|
200 |
+
|
201 |
+
Returns:
|
202 |
+
torch.Tensor: Tensor of shape (batch_size, embedding_dim) containing the embedding
|
203 |
+
of the last valid element for each sequence in the batch.
|
204 |
+
"""
|
205 |
+
flatten_embeddings = embeddings[mask] # (total_batch_events, embedding_dim)
|
206 |
+
lengths = torch.sum(mask, dim=-1) # (batch_size)
|
207 |
+
offsets = torch.cumsum(lengths, dim=0) # (batch_size)
|
208 |
+
last_embeddings = flatten_embeddings[offsets.long() - 1] # (batch_size, embedding_dim)
|
209 |
+
return last_embeddings
|
210 |
+
|
211 |
+
def forward(self, inputs: Dict) -> torch.Tensor:
|
212 |
+
"""
|
213 |
+
Forward pass of the model, handling both training and evaluation modes.
|
214 |
+
|
215 |
+
Args:
|
216 |
+
inputs (Dict): Input dictionary containing:
|
217 |
+
- 'item.ids' (torch.LongTensor): Flattened tensor of item IDs for all sequences in the batch.
|
218 |
+
Shape: (total_batch_events,)
|
219 |
+
- 'item.length' (torch.LongTensor): Sequence lengths for each sample in the batch.
|
220 |
+
Shape: (batch_size,)
|
221 |
+
- 'positive.ids' (torch.LongTensor, training only): Positive sample IDs for contrastive learning.
|
222 |
+
Shape: (total_batch_events,)
|
223 |
+
- 'negative.ids' (torch.LongTensor, training only): Negative sample IDs for contrastive learning.
|
224 |
+
Shape: (total_batch_events,)
|
225 |
+
|
226 |
+
Returns:
|
227 |
+
torch.Tensor:
|
228 |
+
- During training: Binary cross-entropy loss between positive/negative sample scores.
|
229 |
+
Shape: (1,)
|
230 |
+
- During evaluation: Embeddings of the last valid item in each sequence.
|
231 |
+
Shape: (batch_size, embedding_dim)
|
232 |
+
"""
|
233 |
+
all_sample_events = inputs['item.ids'] # (total_batch_events)
|
234 |
+
all_sample_lengths = inputs['item.length'] # (batch_size)
|
235 |
+
|
236 |
+
embeddings, mask = self._apply_sequential_encoder(
|
237 |
+
all_sample_events, all_sample_lengths
|
238 |
+
) # (batch_size, seq_len, embedding_dim), (batch_size, seq_len)
|
239 |
+
|
240 |
+
if self.training: # training mode
|
241 |
+
# queries
|
242 |
+
in_batch_queries_embeddings = embeddings[mask] # (total_batch_events, embedding_dim)
|
243 |
+
|
244 |
+
# positives
|
245 |
+
in_batch_positive_events = inputs['positive.ids'] # (total_batch_events)
|
246 |
+
in_batch_positive_embeddings = self._item_embeddings(
|
247 |
+
in_batch_positive_events
|
248 |
+
) # (total_batch_events, embedding_dim)
|
249 |
+
positive_scores = torch.einsum(
|
250 |
+
'bd,bd->b', in_batch_queries_embeddings, in_batch_positive_embeddings
|
251 |
+
) # (total_batch_events)
|
252 |
+
|
253 |
+
# negatives
|
254 |
+
in_batch_negative_events = inputs['negative.ids'] # (total_batch_events)
|
255 |
+
in_batch_negative_embeddings = self._item_embeddings(
|
256 |
+
in_batch_negative_events
|
257 |
+
) # (total_batch_events, embedding_dim)
|
258 |
+
negative_scores = torch.einsum(
|
259 |
+
'bd,bd->b', in_batch_queries_embeddings, in_batch_negative_embeddings
|
260 |
+
) # (total_batch_events)
|
261 |
+
|
262 |
+
loss = nn.functional.binary_cross_entropy_with_logits(
|
263 |
+
torch.cat([positive_scores, negative_scores], dim=0),
|
264 |
+
torch.cat([torch.ones_like(positive_scores), torch.zeros_like(negative_scores)]),
|
265 |
+
) # (1)
|
266 |
+
|
267 |
+
return loss
|
268 |
+
else: # eval mode
|
269 |
+
last_embeddings = self._get_last_embedding(embeddings, mask) # (batch_size, embedding_dim)
|
270 |
+
return last_embeddings
|
benchmarks/models/sasrec/train.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
import pathlib as Path
|
4 |
+
import random
|
5 |
+
|
6 |
+
import click
|
7 |
+
import numpy as np
|
8 |
+
import polars as pl
|
9 |
+
import torch
|
10 |
+
from model import SASRecEncoder
|
11 |
+
from torch.utils.data import DataLoader
|
12 |
+
|
13 |
+
from data import Data, TrainDataset, collate_fn, preprocess
|
14 |
+
|
15 |
+
|
16 |
+
logging.basicConfig(
|
17 |
+
level=logging.DEBUG, format='[%(asctime)s] [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S'
|
18 |
+
)
|
19 |
+
logger = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
|
22 |
+
def train(
|
23 |
+
train_dataloader: DataLoader,
|
24 |
+
model: SASRecEncoder,
|
25 |
+
optimizer: torch.optim.Optimizer,
|
26 |
+
device: str = 'cpu',
|
27 |
+
num_epochs: int = 100,
|
28 |
+
):
|
29 |
+
logger.debug('Start training...')
|
30 |
+
|
31 |
+
model.train()
|
32 |
+
|
33 |
+
for epoch_num in range(num_epochs):
|
34 |
+
logger.debug(f'Start epoch {epoch_num + 1}')
|
35 |
+
for batch in train_dataloader:
|
36 |
+
for key in batch.keys():
|
37 |
+
batch[key] = batch[key].to(device)
|
38 |
+
|
39 |
+
loss = model(batch)
|
40 |
+
|
41 |
+
optimizer.zero_grad()
|
42 |
+
loss.backward()
|
43 |
+
optimizer.step()
|
44 |
+
|
45 |
+
logger.debug('Training procedure has been finished!')
|
46 |
+
return model.state_dict()
|
47 |
+
|
48 |
+
|
49 |
+
@click.command()
|
50 |
+
@click.option('--exp_name', required=True, type=str)
|
51 |
+
@click.option('--data_dir', required=True, type=str, default='../../data/', show_default=True)
|
52 |
+
@click.option('--checkpoint_dir', required=True, type=str, default='./checkpoints/', show_default=True)
|
53 |
+
@click.option(
|
54 |
+
'--size',
|
55 |
+
required=True,
|
56 |
+
type=click.Choice(['50m', '500m', '5b']),
|
57 |
+
default='50m',
|
58 |
+
show_default=True,
|
59 |
+
)
|
60 |
+
@click.option(
|
61 |
+
'--interaction',
|
62 |
+
required=True,
|
63 |
+
type=click.Choice(['likes', 'listens']),
|
64 |
+
default='likes',
|
65 |
+
show_default=True,
|
66 |
+
)
|
67 |
+
@click.option('--batch_size', required=True, type=int, default=256, show_default=True)
|
68 |
+
@click.option('--max_seq_len', required=False, type=int, default=200, show_default=True)
|
69 |
+
@click.option('--embedding_dim', required=False, type=int, default=64, show_default=True)
|
70 |
+
@click.option('--num_heads', required=False, type=int, default=2, show_default=True)
|
71 |
+
@click.option('--num_layers', required=False, type=int, default=2, show_default=True)
|
72 |
+
@click.option('--learning_rate', required=False, type=float, default=1e-3, show_default=True)
|
73 |
+
@click.option('--dropout', required=False, type=float, default=0.0, show_default=True)
|
74 |
+
@click.option('--seed', required=False, type=int, default=42, show_default=True)
|
75 |
+
@click.option('--device', required=True, type=str, default='cuda:0', show_default=True)
|
76 |
+
@click.option('--num_epochs', required=True, type=int, default=100, show_default=True)
|
77 |
+
def main(
|
78 |
+
exp_name: str,
|
79 |
+
data_dir: str,
|
80 |
+
checkpoint_dir: str,
|
81 |
+
size: str,
|
82 |
+
interaction: str,
|
83 |
+
batch_size: int,
|
84 |
+
max_seq_len: int,
|
85 |
+
embedding_dim: int,
|
86 |
+
num_heads: int,
|
87 |
+
num_layers: int,
|
88 |
+
learning_rate: float,
|
89 |
+
dropout: float,
|
90 |
+
seed: int,
|
91 |
+
device: str,
|
92 |
+
num_epochs: int,
|
93 |
+
):
|
94 |
+
random.seed(seed)
|
95 |
+
np.random.seed(seed)
|
96 |
+
torch.manual_seed(seed)
|
97 |
+
torch.cuda.manual_seed_all(seed)
|
98 |
+
torch.set_float32_matmul_precision('high')
|
99 |
+
|
100 |
+
data_path = Path.Path(data_dir) / 'sequential' / size / interaction
|
101 |
+
df = pl.scan_parquet(data_path.with_suffix('.parquet'))
|
102 |
+
|
103 |
+
checkpoint_path = Path.Path(checkpoint_dir) / f'{exp_name}_best_state.pth'
|
104 |
+
os.makedirs(checkpoint_dir, exist_ok=True)
|
105 |
+
|
106 |
+
logger.debug('Preprocessing data...')
|
107 |
+
data: Data = preprocess(df, interaction, val_size=0, max_seq_len=max_seq_len)
|
108 |
+
train_df = data.train.collect(engine="streaming")
|
109 |
+
logger.debug('Preprocessing data has finished!')
|
110 |
+
|
111 |
+
train_dataset = TrainDataset(dataset=train_df, num_items=data.num_items, max_seq_len=max_seq_len)
|
112 |
+
|
113 |
+
train_dataloader = DataLoader(
|
114 |
+
dataset=train_dataset,
|
115 |
+
batch_size=batch_size,
|
116 |
+
collate_fn=collate_fn,
|
117 |
+
drop_last=True,
|
118 |
+
shuffle=True,
|
119 |
+
num_workers=3,
|
120 |
+
prefetch_factor=10,
|
121 |
+
pin_memory_device="cuda",
|
122 |
+
pin_memory=True,
|
123 |
+
)
|
124 |
+
|
125 |
+
model = SASRecEncoder(
|
126 |
+
num_items=data.num_items,
|
127 |
+
max_sequence_length=max_seq_len,
|
128 |
+
embedding_dim=embedding_dim,
|
129 |
+
num_heads=num_heads,
|
130 |
+
num_layers=num_layers,
|
131 |
+
dropout=dropout,
|
132 |
+
).to(device)
|
133 |
+
|
134 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
|
135 |
+
|
136 |
+
best_checkpoint = train(
|
137 |
+
train_dataloader=train_dataloader, model=model, optimizer=optimizer, device=device, num_epochs=num_epochs
|
138 |
+
)
|
139 |
+
|
140 |
+
logger.debug('Saving model...')
|
141 |
+
|
142 |
+
os.makedirs(checkpoint_dir, exist_ok=True)
|
143 |
+
|
144 |
+
model.load_state_dict(best_checkpoint)
|
145 |
+
torch.save(model, checkpoint_path)
|
146 |
+
logger.debug(f'Saved model as {checkpoint_path}')
|
147 |
+
|
148 |
+
|
149 |
+
if __name__ == '__main__':
|
150 |
+
main()
|
benchmarks/pyproject.toml
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "yambda"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = [
|
6 |
+
{name = "Yandex"}
|
7 |
+
]
|
8 |
+
readme = "README.md"
|
9 |
+
requires-python = ">=3.11"
|
10 |
+
dependencies = [
|
11 |
+
"torch",
|
12 |
+
"polars-u64-idx==1.28.1", # https://github.com/pola-rs/polars/issues/22741
|
13 |
+
"click",
|
14 |
+
"scipy",
|
15 |
+
"numpy",
|
16 |
+
"optuna",
|
17 |
+
"tqdm",
|
18 |
+
"matplotlib"
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
[build-system]
|
23 |
+
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
24 |
+
build-backend = "poetry.core.masonry.api"
|
25 |
+
|
26 |
+
|
27 |
+
[tool.ruff]
|
28 |
+
line-length = 120
|
29 |
+
|
30 |
+
[tool.ruff.lint]
|
31 |
+
# list of all codes - https://docs.astral.sh/ruff/rules/
|
32 |
+
select = [
|
33 |
+
"E", # pycodestyle errors
|
34 |
+
"F", # pyflakes
|
35 |
+
"W", # pycodestyle warnings
|
36 |
+
"I", # isort
|
37 |
+
]
|
38 |
+
|
39 |
+
[tool.ruff.lint.isort]
|
40 |
+
lines-after-imports = 2
|
41 |
+
known-first-party = ["yambda"]
|
42 |
+
|
43 |
+
[tool.ruff.format]
|
44 |
+
quote-style = "preserve"
|
benchmarks/scripts/get_dataset_stats.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import enum
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import click
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
import polars as pl
|
8 |
+
|
9 |
+
|
10 |
+
class ImageFormat(enum.Enum):
|
11 |
+
PNG = "png"
|
12 |
+
JPEG = "jpg"
|
13 |
+
|
14 |
+
|
15 |
+
@click.command()
|
16 |
+
@click.option(
|
17 |
+
"--src_dir",
|
18 |
+
type=click.Path(exists=True, file_okay=False),
|
19 |
+
required=True,
|
20 |
+
help="Path to the directory containing dataset, e.g., './data'.",
|
21 |
+
)
|
22 |
+
@click.option(
|
23 |
+
"--dst_dir",
|
24 |
+
type=click.Path(file_okay=False),
|
25 |
+
required=False,
|
26 |
+
help="Path to the directory where statisics will be saved. "
|
27 |
+
"If not specified, statistics is saved in '{src_dir}/stats'. e.g., './stats'.",
|
28 |
+
)
|
29 |
+
@click.option(
|
30 |
+
"--file_name",
|
31 |
+
type=str,
|
32 |
+
default="multi_event",
|
33 |
+
help="Base name for multi-event file. Default is 'multi_event'.",
|
34 |
+
)
|
35 |
+
def cli(src_dir: str, dst_dir: str, file_name: str):
|
36 |
+
if dst_dir is None:
|
37 |
+
dst_dir = f"{src_dir}/stats"
|
38 |
+
generate_dataset_stats(src_dir, dst_dir, file_name)
|
39 |
+
|
40 |
+
|
41 |
+
def generate_dataset_stats(src_dir: str, dst_dir: str, file_name: str):
|
42 |
+
src_dir, dst_dir = Path(src_dir), Path(dst_dir)
|
43 |
+
src_dir_flat, src_dir_seq = src_dir / "flat", src_dir / "sequential"
|
44 |
+
|
45 |
+
file_name = f"{file_name}.parquet"
|
46 |
+
sizes = sorted(path.name for path in src_dir_flat.iterdir())
|
47 |
+
for size in sizes:
|
48 |
+
path_flat, path_seq = src_dir_flat / size, src_dir_seq / size
|
49 |
+
assert path_seq.exists(), f"Cannot find sequential data in '{path_seq}'"
|
50 |
+
assert (path_flat / file_name).exists(), (
|
51 |
+
"Please, generate flat multi-event file using "
|
52 |
+
"'make_multievent.py' or specify correct name using --file_name"
|
53 |
+
)
|
54 |
+
assert (path_seq / file_name).exists(), (
|
55 |
+
"Please, generate sequential multi-event file "
|
56 |
+
"using 'transform2sequential.py' script or specify correct name "
|
57 |
+
"using --file_name"
|
58 |
+
)
|
59 |
+
|
60 |
+
print(f"Gathering stats for {size}...")
|
61 |
+
dst_dir_size = dst_dir / size
|
62 |
+
dst_dir_size.mkdir(parents=True, exist_ok=True)
|
63 |
+
|
64 |
+
df = pl.scan_parquet(path_seq / file_name)
|
65 |
+
generate_user_history_graph(df, dst_dir_size / "user_history_len.png")
|
66 |
+
generate_log_user_history_graph(df, dst_dir_size / "user_history_log_len.png")
|
67 |
+
|
68 |
+
df = pl.scan_parquet(path_flat / file_name)
|
69 |
+
generate_item_interactions_graph(df, dst_dir_size / "item_interactions.png")
|
70 |
+
get_recom_stats(df).write_csv(dst_dir_size / "recom_event_count.csv")
|
71 |
+
get_history_len_stats(df).write_csv(dst_dir_size / "event_history_len.csv")
|
72 |
+
get_dataset_stats(df).write_csv(dst_dir_size / "dataset_event_stats.csv")
|
73 |
+
|
74 |
+
|
75 |
+
def make_history_len_graph(
|
76 |
+
df: pl.DataFrame,
|
77 |
+
*,
|
78 |
+
qs: tuple[float] | None = None,
|
79 |
+
color: str = "lightskyblue",
|
80 |
+
num_bins: int = 100,
|
81 |
+
title: str | None = None,
|
82 |
+
xlabel: str | None = None,
|
83 |
+
ylabel: str | None = None,
|
84 |
+
ax: plt.Axes | None = None,
|
85 |
+
) -> plt.Axes:
|
86 |
+
if ax is None:
|
87 |
+
_, ax = plt.subplots(figsize=(12, 5))
|
88 |
+
|
89 |
+
count, _, _ = ax.hist(df["value"], bins=num_bins, ec="k", lw=1.0, color=color)
|
90 |
+
ylim = count.max() * 1.05
|
91 |
+
|
92 |
+
xs = {"max": df["value"].max()}
|
93 |
+
if qs is not None:
|
94 |
+
xs.update((f"q{q * 100:.0f}", df["value"].quantile(q)) for q in qs)
|
95 |
+
|
96 |
+
dx = 0.01 * (xs["max"] - df["value"].min())
|
97 |
+
template = "{label}={x:.3f}" if xs["max"] <= 10 else "{label}={x:.0f}"
|
98 |
+
for label, x in xs.items():
|
99 |
+
ax.plot([x, x], [0, ylim], ls="--", c="k")
|
100 |
+
text = template.format(label=label, x=x)
|
101 |
+
ax.text(x + dx, ylim // 2, text, rotation=90, fontsize=16, bbox=dict(alpha=0.1, color="r"))
|
102 |
+
|
103 |
+
if title is not None:
|
104 |
+
ax.set_title(title, fontsize=24)
|
105 |
+
ax.set_xlabel(xlabel or "Value", fontsize=22)
|
106 |
+
ax.set_ylabel(ylabel or "Count", fontsize=22)
|
107 |
+
|
108 |
+
ax.set_ylim([0, ylim])
|
109 |
+
|
110 |
+
ax.tick_params(labelsize=16)
|
111 |
+
ax.ticklabel_format(style="sci", useMathText=True)
|
112 |
+
|
113 |
+
|
114 |
+
def save_graph(output_path: os.PathLike, fmt: ImageFormat = ImageFormat.PNG):
|
115 |
+
output_path = Path(output_path)
|
116 |
+
if not output_path.suffix:
|
117 |
+
output_path = output_path.with_suffix(f".{fmt.value}")
|
118 |
+
|
119 |
+
if output_path.exists():
|
120 |
+
print(f"Rewriting file '{output_path}'")
|
121 |
+
else:
|
122 |
+
print(f"Saving to '{output_path}'")
|
123 |
+
|
124 |
+
plt.savefig(output_path, dpi=300, format=fmt.value)
|
125 |
+
|
126 |
+
|
127 |
+
def generate_user_history_graph(df: pl.LazyFrame, out_path: os.PathLike):
|
128 |
+
_, ax = plt.subplots(figsize=(12, 5))
|
129 |
+
|
130 |
+
make_history_len_graph(
|
131 |
+
df.select(value=pl.col("item_id").list.len()).collect(),
|
132 |
+
num_bins=100,
|
133 |
+
qs=(0.5, 0.9, 0.95),
|
134 |
+
xlabel="Events",
|
135 |
+
ylabel="Users",
|
136 |
+
ax=ax,
|
137 |
+
)
|
138 |
+
plt.tight_layout()
|
139 |
+
save_graph(out_path)
|
140 |
+
|
141 |
+
|
142 |
+
def generate_log_user_history_graph(df: pl.LazyFrame, out_path: os.PathLike):
|
143 |
+
_, ax = plt.subplots(figsize=(12, 5))
|
144 |
+
|
145 |
+
make_history_len_graph(
|
146 |
+
df.select(value=pl.col("item_id").list.len().log10()).collect(),
|
147 |
+
num_bins=40,
|
148 |
+
xlabel="$Log_{10}$(Events)",
|
149 |
+
ylabel="Users",
|
150 |
+
color="lightgreen",
|
151 |
+
ax=ax,
|
152 |
+
)
|
153 |
+
plt.tight_layout()
|
154 |
+
save_graph(out_path)
|
155 |
+
|
156 |
+
|
157 |
+
def generate_item_interactions_graph(df: pl.LazyFrame, out_path: os.PathLike):
|
158 |
+
_, ax = plt.subplots(figsize=(12, 5))
|
159 |
+
|
160 |
+
make_history_len_graph(
|
161 |
+
df.group_by("item_id").len().select(value=pl.col("len").log10()).collect(),
|
162 |
+
num_bins=30,
|
163 |
+
qs=(0.5, 0.9, 0.95),
|
164 |
+
xlabel="$Log_{10}$(Events)",
|
165 |
+
ylabel="Items",
|
166 |
+
color="orange",
|
167 |
+
ax=ax,
|
168 |
+
)
|
169 |
+
plt.tight_layout()
|
170 |
+
save_graph(out_path)
|
171 |
+
|
172 |
+
|
173 |
+
def get_recom_stats(df: pl.LazyFrame) -> pl.DataFrame:
|
174 |
+
print("Computing recom stats")
|
175 |
+
df_cnt = df.group_by(("event_type", "is_organic")).len().collect()
|
176 |
+
df_recom = df_cnt.filter(pl.col("is_organic").eq(0)).select(pl.col("event_type"), pl.col("len").alias("recom"))
|
177 |
+
df_total = df_cnt.group_by("event_type").sum().select(pl.col("event_type"), pl.col("len").alias("total"))
|
178 |
+
return df_total.join(df_recom, on="event_type").with_columns(ratio=pl.col("recom") / pl.col("total"))
|
179 |
+
|
180 |
+
|
181 |
+
def get_history_len_stats(df: pl.LazyFrame) -> pl.DataFrame:
|
182 |
+
print("Computing event history length stats")
|
183 |
+
return (
|
184 |
+
df.group_by(("event_type", "uid"))
|
185 |
+
.len()
|
186 |
+
.group_by("event_type")
|
187 |
+
.agg(
|
188 |
+
median=pl.col("len").quantile(0.5).cast(pl.Int32),
|
189 |
+
q90=pl.col("len").quantile(0.9).cast(pl.Int32),
|
190 |
+
q95=pl.col("len").quantile(0.95).cast(pl.Int32),
|
191 |
+
)
|
192 |
+
.collect()
|
193 |
+
)
|
194 |
+
|
195 |
+
|
196 |
+
def get_dataset_stats(df: pl.LazyFrame) -> pl.DataFrame:
|
197 |
+
print("Computing dataset stats")
|
198 |
+
return df.select(
|
199 |
+
users=pl.col("uid").unique().len(),
|
200 |
+
items=pl.col("item_id").unique().len(),
|
201 |
+
listens=pl.col("event_type").filter(pl.col("event_type").eq("listen")).len(),
|
202 |
+
likes=pl.col("event_type").filter(pl.col("event_type").eq("like")).len(),
|
203 |
+
dislikes=pl.col("event_type").filter(pl.col("event_type").eq("dislike")).len(),
|
204 |
+
unlikes=pl.col("event_type").filter(pl.col("event_type").eq("unlike")).len(),
|
205 |
+
undislikes=pl.col("event_type").filter(pl.col("event_type").eq("undislike")).len(),
|
206 |
+
).collect()
|
207 |
+
|
208 |
+
|
209 |
+
if __name__ == "__main__":
|
210 |
+
cli()
|
benchmarks/scripts/make_multievent.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import click
|
4 |
+
import polars as pl
|
5 |
+
|
6 |
+
|
7 |
+
@click.command()
|
8 |
+
@click.option(
|
9 |
+
"--src_dir",
|
10 |
+
type=click.Path(exists=True, file_okay=False),
|
11 |
+
required=True,
|
12 |
+
help="Path to the directory containing source parquet files, e.g., './50m'.",
|
13 |
+
)
|
14 |
+
@click.option(
|
15 |
+
"--dst_dir",
|
16 |
+
type=click.Path(file_okay=False),
|
17 |
+
required=False,
|
18 |
+
help="Path to the directory where Parquet files will be saved. "
|
19 |
+
"If not specified, Parquet files are saved in 'src_dir'. e.g., './out'.",
|
20 |
+
)
|
21 |
+
@click.option(
|
22 |
+
"--file_name",
|
23 |
+
type=str,
|
24 |
+
default="multi_event",
|
25 |
+
help="Base name for the output Parquet file. Default is 'multi_event'.",
|
26 |
+
)
|
27 |
+
def cli(src_dir: str, dst_dir: str, file_name: str):
|
28 |
+
if dst_dir is None:
|
29 |
+
dst_dir = src_dir
|
30 |
+
|
31 |
+
print(f"{src_dir=}, {dst_dir=}, {file_name=}")
|
32 |
+
make_multievent_dataset(src_dir, dst_dir, file_name)
|
33 |
+
|
34 |
+
|
35 |
+
def make_multievent_dataset(src_dir: str, dst_dir: str, file_name: str):
|
36 |
+
os.makedirs(dst_dir, exist_ok=True)
|
37 |
+
|
38 |
+
dislikes = pl.scan_parquet(os.path.join(src_dir, "dislikes.parquet"))
|
39 |
+
likes = pl.scan_parquet(os.path.join(src_dir, "likes.parquet"))
|
40 |
+
listens = pl.scan_parquet(os.path.join(src_dir, "listens.parquet"))
|
41 |
+
undislikes = pl.scan_parquet(os.path.join(src_dir, "undislikes.parquet"))
|
42 |
+
unlikes = pl.scan_parquet(os.path.join(src_dir, "unlikes.parquet"))
|
43 |
+
|
44 |
+
events = pl.Enum(["listen", "dislike", "like", "undislike", "unlike"])
|
45 |
+
|
46 |
+
combined_df = pl.concat(
|
47 |
+
[
|
48 |
+
listens.with_columns(
|
49 |
+
pl.lit("listen").cast(events).alias("event_type"),
|
50 |
+
),
|
51 |
+
dislikes.with_columns(
|
52 |
+
pl.lit(None).alias("played_ratio_pct"),
|
53 |
+
pl.lit(None).alias("track_length_seconds"),
|
54 |
+
pl.lit("dislike").cast(events).alias("event_type"),
|
55 |
+
),
|
56 |
+
likes.with_columns(
|
57 |
+
pl.lit(None).alias("played_ratio_pct"),
|
58 |
+
pl.lit(None).alias("track_length_seconds"),
|
59 |
+
pl.lit("like").cast(events).alias("event_type"),
|
60 |
+
),
|
61 |
+
undislikes.with_columns(
|
62 |
+
pl.lit(None).alias("played_ratio_pct"),
|
63 |
+
pl.lit(None).alias("track_length_seconds"),
|
64 |
+
pl.lit("undislike").cast(events).alias("event_type"),
|
65 |
+
),
|
66 |
+
unlikes.with_columns(
|
67 |
+
pl.lit(None).alias("played_ratio_pct"),
|
68 |
+
pl.lit(None).alias("track_length_seconds"),
|
69 |
+
pl.lit("unlike").cast(events).alias("event_type"),
|
70 |
+
),
|
71 |
+
]
|
72 |
+
).sort(
|
73 |
+
by=[
|
74 |
+
"uid",
|
75 |
+
"timestamp",
|
76 |
+
],
|
77 |
+
maintain_order=True,
|
78 |
+
)
|
79 |
+
|
80 |
+
combined_df.with_columns(pl.col("event_type").cast(events)).sink_parquet(
|
81 |
+
os.path.join(dst_dir, file_name + ".parquet"),
|
82 |
+
compression="lz4",
|
83 |
+
statistics=True,
|
84 |
+
)
|
85 |
+
|
86 |
+
|
87 |
+
if __name__ == "__main__":
|
88 |
+
cli()
|
benchmarks/scripts/transform2sequential.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import click
|
4 |
+
import polars as pl
|
5 |
+
|
6 |
+
|
7 |
+
@click.command()
|
8 |
+
@click.option(
|
9 |
+
"--src_dir",
|
10 |
+
type=click.Path(exists=True, file_okay=False),
|
11 |
+
required=True,
|
12 |
+
help="Path to the directory containing source Parquet files, e.g., './50m'.",
|
13 |
+
)
|
14 |
+
@click.option(
|
15 |
+
"--dst_dir",
|
16 |
+
type=click.Path(file_okay=False),
|
17 |
+
required=False,
|
18 |
+
help="Path to the directory where Parquet files will be saved. "
|
19 |
+
"If not specified, Parquet files are saved in 'src_dir' with prefix 'seq_'.",
|
20 |
+
)
|
21 |
+
@click.option(
|
22 |
+
"--files",
|
23 |
+
type=str,
|
24 |
+
multiple=True,
|
25 |
+
help="List of parquet filenames to convert. If not specified, all Parquet files in 'src_dir' will be converted. "
|
26 |
+
"For example, '--files dislike.parquet --files like.parquet'.",
|
27 |
+
)
|
28 |
+
@click.option(
|
29 |
+
"--aggregation",
|
30 |
+
type=click.Choice(["structs", "columns"]),
|
31 |
+
required=True,
|
32 |
+
help="Agg method: 'structs' for a sequence of structs per 'uid', 'columns' for individual column aggregation.",
|
33 |
+
)
|
34 |
+
def cli(src_dir: str, dst_dir: str, files: list[str], aggregation: str):
|
35 |
+
transform2sequential(src_dir, dst_dir, files, aggregation)
|
36 |
+
|
37 |
+
|
38 |
+
def transform2sequential(src_dir: str, dst_dir: str, files: list[str], aggregation: str):
|
39 |
+
for file in files:
|
40 |
+
print(f"Processing file: {file}")
|
41 |
+
|
42 |
+
src_path = os.path.join(src_dir, file)
|
43 |
+
|
44 |
+
parquet_path = os.path.join(dst_dir, file)
|
45 |
+
|
46 |
+
if os.path.exists(parquet_path):
|
47 |
+
parquet_path = os.path.join(dst_dir, f"{aggregation}_" + file)
|
48 |
+
assert not os.path.exists(parquet_path)
|
49 |
+
|
50 |
+
os.makedirs(dst_dir, exist_ok=True)
|
51 |
+
|
52 |
+
df = pl.scan_parquet(src_path)
|
53 |
+
|
54 |
+
if aggregation == "structs":
|
55 |
+
seq_df = (
|
56 |
+
df.select("uid", pl.struct(pl.all().exclude("uid")).alias("events"))
|
57 |
+
.group_by("uid", maintain_order=True)
|
58 |
+
.agg(pl.col("events"))
|
59 |
+
)
|
60 |
+
seq_df.sink_parquet(
|
61 |
+
parquet_path,
|
62 |
+
compression="lz4",
|
63 |
+
statistics=True,
|
64 |
+
)
|
65 |
+
|
66 |
+
elif aggregation == "columns":
|
67 |
+
col_agg_df = df.group_by("uid", maintain_order=True).agg(pl.all().exclude("uid"))
|
68 |
+
col_agg_df.sink_parquet(
|
69 |
+
parquet_path,
|
70 |
+
compression="lz4",
|
71 |
+
statistics=True,
|
72 |
+
)
|
73 |
+
|
74 |
+
|
75 |
+
if __name__ == "__main__":
|
76 |
+
cli()
|
benchmarks/tests/test_timesplit.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import polars as pl
|
3 |
+
|
4 |
+
from yambda.processing.timesplit import flat_split_train_val_test, sequential_split_train_val_test
|
5 |
+
|
6 |
+
|
7 |
+
def create_dataframe(n: int = 1000) -> pl.DataFrame:
|
8 |
+
uids = np.random.randint(1, int(n * 0.05), size=n)
|
9 |
+
item_ids = np.random.randint(100, 200, size=n)
|
10 |
+
|
11 |
+
timestamps = np.random.randint(0, 100_000, size=n)
|
12 |
+
is_organic = np.random.choice([True, False], size=n)
|
13 |
+
|
14 |
+
df = pl.DataFrame(
|
15 |
+
{"uid": uids, "item_id": item_ids, "timestamp": timestamps, "is_organic": is_organic},
|
16 |
+
schema={"uid": pl.UInt32, "item_id": pl.UInt32, "timestamp": pl.UInt32, "is_organic": pl.UInt8},
|
17 |
+
)
|
18 |
+
|
19 |
+
df = df.sort(["uid", "timestamp"])
|
20 |
+
|
21 |
+
return df
|
22 |
+
|
23 |
+
|
24 |
+
def test_cross_check():
|
25 |
+
df = create_dataframe(10000)
|
26 |
+
|
27 |
+
q75_timestamp = int(df["timestamp"].quantile(0.75))
|
28 |
+
|
29 |
+
print(q75_timestamp)
|
30 |
+
|
31 |
+
flat_train, flat_val, flat_test = flat_split_train_val_test(
|
32 |
+
df.lazy(), test_timestamp=q75_timestamp, gap_size=1000, val_size=1000
|
33 |
+
)
|
34 |
+
|
35 |
+
assert flat_val is not None
|
36 |
+
|
37 |
+
df.group_by("uid", maintain_order=True).agg(pl.all().exclude("uid")).lazy()
|
38 |
+
|
39 |
+
seq_train, seq_val, seq_test = sequential_split_train_val_test(
|
40 |
+
df.group_by("uid", maintain_order=True).agg(pl.all().exclude("uid")).lazy(),
|
41 |
+
test_timestamp=q75_timestamp,
|
42 |
+
gap_size=1000,
|
43 |
+
val_size=1000,
|
44 |
+
)
|
45 |
+
|
46 |
+
assert seq_val is not None
|
47 |
+
|
48 |
+
assert seq_train.explode(pl.all().exclude("uid")).collect().equals(flat_train.collect())
|
49 |
+
assert seq_val.explode(pl.all().exclude("uid")).collect().equals(flat_val.collect())
|
50 |
+
assert seq_test.explode(pl.all().exclude("uid")).collect().equals(flat_test.collect())
|
benchmarks/yambda/__init__.py
ADDED
File without changes
|
benchmarks/yambda/constants.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class Constants:
|
2 |
+
HOUR_SECONDS = 60 * 60
|
3 |
+
DAY_SECONDS = 24 * HOUR_SECONDS
|
4 |
+
|
5 |
+
GAP_SIZE = HOUR_SECONDS // 2
|
6 |
+
VAL_SIZE = 1 * DAY_SECONDS
|
7 |
+
TEST_SIZE = 1 * DAY_SECONDS
|
8 |
+
|
9 |
+
LAST_TIMESTAMP = 26000000
|
10 |
+
TEST_TIMESTAMP = LAST_TIMESTAMP - TEST_SIZE
|
11 |
+
|
12 |
+
TRACK_LISTEN_THRESHOLD = 50
|
13 |
+
|
14 |
+
NUM_RANKED_ITEMS = 100
|
15 |
+
|
16 |
+
METRICS = [
|
17 |
+
"ndcg@10",
|
18 |
+
"ndcg@50",
|
19 |
+
"ndcg@100",
|
20 |
+
"dcg@10",
|
21 |
+
"dcg@50",
|
22 |
+
"dcg@100",
|
23 |
+
"recall@10",
|
24 |
+
"recall@50",
|
25 |
+
"recall@100",
|
26 |
+
"coverage@10",
|
27 |
+
"coverage@50",
|
28 |
+
"coverage@100",
|
29 |
+
]
|
benchmarks/yambda/evaluation/__init__.py
ADDED
File without changes
|
benchmarks/yambda/evaluation/metrics.py
ADDED
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from abc import ABC, abstractmethod
|
4 |
+
from collections import defaultdict
|
5 |
+
from typing import Any, Iterable
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
from .ranking import Ranked, Targets
|
11 |
+
|
12 |
+
|
13 |
+
def cut_off_ranked(ranked: Ranked, targets: Targets) -> Ranked:
|
14 |
+
mask = torch.isin(ranked.user_ids, targets.user_ids)
|
15 |
+
|
16 |
+
assert ranked.scores is not None
|
17 |
+
|
18 |
+
ranked = Ranked(
|
19 |
+
user_ids=ranked.user_ids[mask],
|
20 |
+
scores=ranked.scores[mask, :],
|
21 |
+
item_ids=ranked.item_ids[mask, :],
|
22 |
+
num_item_ids=ranked.num_item_ids,
|
23 |
+
)
|
24 |
+
|
25 |
+
assert ranked.item_ids.shape[0] == len(targets), "Ranked doesn't contain all targets.user_ids"
|
26 |
+
|
27 |
+
return ranked
|
28 |
+
|
29 |
+
|
30 |
+
class Metric(ABC):
|
31 |
+
@abstractmethod
|
32 |
+
def __call__(
|
33 |
+
self, ranked: Ranked | None, targets: Targets | None, target_mask: torch.Tensor | None, ks: Iterable[int]
|
34 |
+
) -> dict[int, float]:
|
35 |
+
pass
|
36 |
+
|
37 |
+
|
38 |
+
class Recall(Metric):
|
39 |
+
def __call__(
|
40 |
+
self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
|
41 |
+
) -> dict[int, float]:
|
42 |
+
assert all(0 < k <= target_mask.shape[1] for k in ks)
|
43 |
+
|
44 |
+
values = {}
|
45 |
+
|
46 |
+
for k in ks:
|
47 |
+
num_positives = targets.lengths.to(torch.float32)
|
48 |
+
num_positives[num_positives == 0] = torch.inf
|
49 |
+
|
50 |
+
values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / num_positives
|
51 |
+
|
52 |
+
values[k] = torch.mean(values[k]).item()
|
53 |
+
|
54 |
+
return values
|
55 |
+
|
56 |
+
|
57 |
+
class Precision(Metric):
|
58 |
+
def __call__(
|
59 |
+
self, ranked: Ranked | None, targets: Targets | None, target_mask: torch.Tensor, ks: Iterable[int]
|
60 |
+
) -> dict[int, float]:
|
61 |
+
assert all(0 < k <= target_mask.shape[1] for k in ks)
|
62 |
+
|
63 |
+
values = {}
|
64 |
+
|
65 |
+
for k in ks:
|
66 |
+
values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / k
|
67 |
+
values[k] = torch.mean(values[k]).item()
|
68 |
+
|
69 |
+
return values
|
70 |
+
|
71 |
+
|
72 |
+
class Coverage(Metric):
|
73 |
+
def __init__(self, cut_off_ranked: bool = False):
|
74 |
+
self.cut_off_ranked = cut_off_ranked
|
75 |
+
|
76 |
+
def __call__(
|
77 |
+
self, ranked: Ranked, targets: Targets | None, target_mask: torch.Tensor | None, ks: Iterable[int]
|
78 |
+
) -> dict[int, float]:
|
79 |
+
if self.cut_off_ranked:
|
80 |
+
assert targets is not None
|
81 |
+
ranked = cut_off_ranked(ranked, targets)
|
82 |
+
|
83 |
+
assert all(0 < k <= ranked.item_ids.shape[1] for k in ks)
|
84 |
+
|
85 |
+
assert ranked.num_item_ids is not None
|
86 |
+
|
87 |
+
values = {}
|
88 |
+
for k in ks:
|
89 |
+
values[k] = ranked.item_ids[:, :k].flatten().unique().shape[0] / ranked.num_item_ids
|
90 |
+
|
91 |
+
return values
|
92 |
+
|
93 |
+
|
94 |
+
class MRR(Metric):
|
95 |
+
def __call__(
|
96 |
+
self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
|
97 |
+
) -> dict[int, float]:
|
98 |
+
assert all(0 < k <= target_mask.shape[1] for k in ks)
|
99 |
+
|
100 |
+
values = {}
|
101 |
+
for k in ks:
|
102 |
+
num_positives = targets.lengths.to(torch.float32)
|
103 |
+
|
104 |
+
indexes = torch.argmax(target_mask[:, :k].to(torch.float32), dim=-1).to(torch.float32) + 1
|
105 |
+
indexes[num_positives == 0] = torch.inf
|
106 |
+
|
107 |
+
rr = 1 / indexes
|
108 |
+
|
109 |
+
values[k] = torch.mean(rr).item()
|
110 |
+
|
111 |
+
return values
|
112 |
+
|
113 |
+
|
114 |
+
class DCG(Metric):
|
115 |
+
def __call__(
|
116 |
+
self, ranked: Ranked | None, targets: Targets | None, target_mask: torch.Tensor, ks: Iterable[int]
|
117 |
+
) -> dict[int, float]:
|
118 |
+
assert all(0 < k <= target_mask.shape[1] for k in ks)
|
119 |
+
|
120 |
+
values = {}
|
121 |
+
|
122 |
+
discounts = 1.0 / torch.log2(
|
123 |
+
torch.arange(2, target_mask.shape[1] + 2, device=target_mask.device, dtype=torch.float32)
|
124 |
+
)
|
125 |
+
|
126 |
+
for k in ks:
|
127 |
+
dcg_k = torch.sum(target_mask[:, :k] * discounts[:k], dim=1)
|
128 |
+
values[k] = torch.mean(dcg_k).item()
|
129 |
+
|
130 |
+
return values
|
131 |
+
|
132 |
+
|
133 |
+
class NDCG(Metric):
|
134 |
+
def __call__(
|
135 |
+
self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
|
136 |
+
) -> dict[int, float]:
|
137 |
+
actual_dcg = DCG()(ranked, targets, target_mask, ks)
|
138 |
+
|
139 |
+
ideal_target_mask = (
|
140 |
+
torch.arange(target_mask.shape[1], device=targets.device)[None, :] < targets.lengths[:, None]
|
141 |
+
).to(torch.float32)
|
142 |
+
assert target_mask.shape == ideal_target_mask.shape
|
143 |
+
|
144 |
+
ideal_dcg = DCG()(ranked, targets, ideal_target_mask, ks)
|
145 |
+
|
146 |
+
ndcg_values = {k: (actual_dcg[k] / ideal_dcg[k] if ideal_dcg[k] != 0 else 0.0) for k in ks}
|
147 |
+
|
148 |
+
return ndcg_values
|
149 |
+
|
150 |
+
|
151 |
+
REGISTERED_METRIC_FN = {
|
152 |
+
"recall": Recall(),
|
153 |
+
"precision": Precision(),
|
154 |
+
"mrr": MRR(),
|
155 |
+
"dcg": DCG(),
|
156 |
+
"ndcg": NDCG(),
|
157 |
+
"coverage": Coverage(cut_off_ranked=False),
|
158 |
+
}
|
159 |
+
|
160 |
+
|
161 |
+
def _parse_metrics(metric_names: list[str]) -> dict[str, list[int]]:
|
162 |
+
parsed_metrics = []
|
163 |
+
|
164 |
+
for metric in metric_names:
|
165 |
+
parts = metric.split('@')
|
166 |
+
name = parts[0]
|
167 |
+
|
168 |
+
assert len(parts) > 1, f"Invalid metric: {metric}, specify @k"
|
169 |
+
|
170 |
+
value = int(parts[1])
|
171 |
+
parsed_metrics.append((name, value))
|
172 |
+
|
173 |
+
metrics = defaultdict(list)
|
174 |
+
for m in parsed_metrics:
|
175 |
+
metrics[m[0]].append(m[1])
|
176 |
+
|
177 |
+
return metrics
|
178 |
+
|
179 |
+
|
180 |
+
def create_target_mask(ranked: Ranked, targets: Targets) -> torch.Tensor:
|
181 |
+
ranked = cut_off_ranked(ranked, targets)
|
182 |
+
|
183 |
+
assert ranked.device == targets.device
|
184 |
+
assert ranked.item_ids.shape[0] == len(targets)
|
185 |
+
|
186 |
+
target_mask = ranked.item_ids.new_zeros(ranked.item_ids.shape, dtype=torch.float32)
|
187 |
+
|
188 |
+
for i, target in enumerate(tqdm(targets.item_ids, desc="Making target mask")):
|
189 |
+
target_mask[i, torch.isin(ranked.item_ids[i], target)] = 1.0
|
190 |
+
|
191 |
+
return target_mask
|
192 |
+
|
193 |
+
|
194 |
+
def calc_metrics(ranked: Ranked, targets: Targets, metrics: list[str]) -> dict[str, Any]:
|
195 |
+
grouped_metrics = _parse_metrics(metrics)
|
196 |
+
|
197 |
+
result = {}
|
198 |
+
|
199 |
+
target_mask = create_target_mask(ranked, targets)
|
200 |
+
|
201 |
+
for name, ks in grouped_metrics.items():
|
202 |
+
result[name] = REGISTERED_METRIC_FN[name](ranked, targets, target_mask, ks=ks)
|
203 |
+
|
204 |
+
return result
|
benchmarks/yambda/evaluation/ranking.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
from functools import cached_property
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import polars as pl
|
6 |
+
import torch
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
|
10 |
+
@dataclasses.dataclass
|
11 |
+
class Embeddings:
|
12 |
+
ids: torch.Tensor
|
13 |
+
embeddings: torch.Tensor
|
14 |
+
|
15 |
+
def __post_init__(self):
|
16 |
+
assert self.ids.dim() == 1
|
17 |
+
assert self.embeddings.dim() == 2
|
18 |
+
assert self.ids.shape[0] == self.embeddings.shape[0]
|
19 |
+
|
20 |
+
assert self.ids.device == self.embeddings.device
|
21 |
+
|
22 |
+
if not torch.all(self.ids[:-1] <= self.ids[1:]):
|
23 |
+
indexes = torch.argsort(self.ids, descending=False)
|
24 |
+
self.embeddings = self.embeddings[indexes, :]
|
25 |
+
self.ids = self.ids[indexes]
|
26 |
+
|
27 |
+
assert torch.all(self.ids[:-1] < self.ids[1:]), "ids should be unique"
|
28 |
+
|
29 |
+
@property
|
30 |
+
def device(self):
|
31 |
+
return self.ids.device
|
32 |
+
|
33 |
+
def save(self, file_path: str):
|
34 |
+
ids_np = self.ids.cpu().numpy()
|
35 |
+
embeddings_np = self.embeddings.cpu().numpy()
|
36 |
+
np.savez(file_path, ids=ids_np, embeddings=embeddings_np)
|
37 |
+
|
38 |
+
@classmethod
|
39 |
+
def load(cls, file_path: str, device: torch.device = torch.device('cpu')) -> 'Embeddings':
|
40 |
+
with np.load(file_path) as data:
|
41 |
+
ids_np = data['ids']
|
42 |
+
embeddings_np = data['embeddings']
|
43 |
+
|
44 |
+
ids = torch.from_numpy(ids_np).to(device)
|
45 |
+
embeddings = torch.from_numpy(embeddings_np).to(device)
|
46 |
+
|
47 |
+
return cls(ids=ids, embeddings=embeddings)
|
48 |
+
|
49 |
+
|
50 |
+
@dataclasses.dataclass
|
51 |
+
class Targets:
|
52 |
+
user_ids: torch.Tensor
|
53 |
+
item_ids: list[torch.Tensor]
|
54 |
+
|
55 |
+
def __post_init__(self):
|
56 |
+
assert len(self.item_ids) > 0
|
57 |
+
assert self.user_ids.dim() == 1
|
58 |
+
assert self.user_ids.shape[0] == len(self.item_ids)
|
59 |
+
assert all(x.dim() == 1 for x in self.item_ids), "all ids should be 1D"
|
60 |
+
|
61 |
+
assert all(x.device == self.item_ids[0].device for x in self.item_ids), "all ids should be on the same device"
|
62 |
+
assert self.user_ids.device == self.item_ids[0].device
|
63 |
+
|
64 |
+
if not torch.all(self.user_ids[:-1] <= self.user_ids[1:]):
|
65 |
+
indexes = torch.argsort(self.user_ids, descending=False)
|
66 |
+
self.item_ids = [self.item_ids[i] for i in indexes]
|
67 |
+
self.user_ids = self.user_ids[indexes]
|
68 |
+
|
69 |
+
assert torch.all(self.user_ids[:-1] < self.user_ids[1:]), "user_ids should be unique"
|
70 |
+
|
71 |
+
@cached_property
|
72 |
+
def lengths(self):
|
73 |
+
return torch.tensor([ids.shape[0] for ids in self.item_ids], device=self.item_ids[0].device)
|
74 |
+
|
75 |
+
def __len__(self):
|
76 |
+
return len(self.item_ids)
|
77 |
+
|
78 |
+
@property
|
79 |
+
def device(self):
|
80 |
+
return self.user_ids.device
|
81 |
+
|
82 |
+
@classmethod
|
83 |
+
def from_sequential(cls, df: pl.LazyFrame | pl.DataFrame, device: torch.device | str) -> 'Targets':
|
84 |
+
df = df.lazy()
|
85 |
+
return cls(
|
86 |
+
df.select("uid").collect()["uid"].to_torch().to(device),
|
87 |
+
[torch.tensor(x, device=device) for x in df.select("item_id").collect()["item_id"].to_list()],
|
88 |
+
)
|
89 |
+
|
90 |
+
|
91 |
+
@dataclasses.dataclass
|
92 |
+
class Ranked:
|
93 |
+
user_ids: torch.Tensor
|
94 |
+
item_ids: torch.Tensor
|
95 |
+
scores: torch.Tensor | None = None
|
96 |
+
|
97 |
+
num_item_ids: int | None = None # number of all items. Useful for coverage and etc.
|
98 |
+
|
99 |
+
def __post_init__(self):
|
100 |
+
if self.scores is None:
|
101 |
+
self.scores = torch.arange(
|
102 |
+
self.item_ids.shape[1], 0, -1, device=self.item_ids.device, dtype=torch.float32
|
103 |
+
).expand((self.user_ids.shape[0], self.item_ids.shape[1]))
|
104 |
+
|
105 |
+
assert self.user_ids.dim() == 1
|
106 |
+
assert self.scores.dim() == 2
|
107 |
+
assert self.scores.shape == self.item_ids.shape
|
108 |
+
assert self.user_ids.shape[0] == self.scores.shape[0]
|
109 |
+
|
110 |
+
assert self.user_ids.device == self.scores.device == self.item_ids.device
|
111 |
+
|
112 |
+
assert torch.all(self.scores[:, :-1] >= self.scores[:, 1:]), "scores should be sorted"
|
113 |
+
|
114 |
+
if not torch.all(self.user_ids[:-1] <= self.user_ids[1:]):
|
115 |
+
indexes = torch.argsort(self.user_ids, descending=False)
|
116 |
+
self.item_ids = self.item_ids[indexes, :]
|
117 |
+
self.scores = self.scores[indexes, :]
|
118 |
+
|
119 |
+
@property
|
120 |
+
def device(self):
|
121 |
+
return self.user_ids.device
|
122 |
+
|
123 |
+
|
124 |
+
def rank_items(users: Embeddings, items: Embeddings, num_items: int, batch_size: int = 128) -> Ranked:
|
125 |
+
assert users.device == items.device
|
126 |
+
|
127 |
+
num_users = users.ids.shape[0]
|
128 |
+
|
129 |
+
scores = users.embeddings.new_empty((num_users, num_items))
|
130 |
+
item_ids = users.embeddings.new_empty((num_users, num_items), dtype=torch.long)
|
131 |
+
|
132 |
+
for batch_idx in tqdm(range((num_users + batch_size - 1) // batch_size), desc="Calc topk by batches"):
|
133 |
+
start_idx = batch_idx * batch_size
|
134 |
+
end_idx = (batch_idx + 1) * batch_size
|
135 |
+
|
136 |
+
batch_scores = users.embeddings[start_idx:end_idx, :] @ items.embeddings.T
|
137 |
+
|
138 |
+
sort_indices = batch_scores.topk(num_items, dim=-1).indices
|
139 |
+
scores[start_idx:end_idx, :] = torch.gather(batch_scores, dim=-1, index=sort_indices)
|
140 |
+
|
141 |
+
item_ids[start_idx:end_idx, :] = torch.gather(
|
142 |
+
items.ids.expand(sort_indices.shape[0], items.ids.shape[0]), dim=-1, index=sort_indices
|
143 |
+
)
|
144 |
+
|
145 |
+
return Ranked(user_ids=users.ids, item_ids=item_ids, scores=scores, num_item_ids=items.ids.shape[0])
|
benchmarks/yambda/processing/__init__.py
ADDED
File without changes
|
benchmarks/yambda/processing/chunk_read.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import Any, Generator
|
3 |
+
|
4 |
+
import polars as pl
|
5 |
+
import torch
|
6 |
+
|
7 |
+
|
8 |
+
# https://github.com/pola-rs/polars/issues/10683
|
9 |
+
def iter_slices(df: pl.LazyFrame, batch_size: int) -> Generator[pl.DataFrame, None, None]:
|
10 |
+
assert isinstance(df, pl.LazyFrame), "df must be a LazyFrame"
|
11 |
+
|
12 |
+
def get_batch(df: pl.LazyFrame, offset: int, batch_size: int) -> pl.DataFrame:
|
13 |
+
batch = df.slice(offset, batch_size)
|
14 |
+
batch = batch.collect()
|
15 |
+
return batch
|
16 |
+
|
17 |
+
batch = get_batch(df, 0, batch_size)
|
18 |
+
|
19 |
+
yield batch
|
20 |
+
|
21 |
+
offset = len(batch)
|
22 |
+
if offset:
|
23 |
+
while True:
|
24 |
+
batch = get_batch(df, offset, batch_size)
|
25 |
+
len_ = len(batch)
|
26 |
+
if len_:
|
27 |
+
offset += len_
|
28 |
+
yield batch
|
29 |
+
else:
|
30 |
+
break
|
31 |
+
|
32 |
+
|
33 |
+
def iter_rows(df: pl.LazyFrame, batch_size: int) -> Generator[dict[str, Any], None, None]:
|
34 |
+
for batch in iter_slices(df, batch_size):
|
35 |
+
for row in batch.iter_rows(named=True):
|
36 |
+
yield row
|
37 |
+
|
38 |
+
|
39 |
+
class YambdaIterableDataset(torch.utils.data.IterableDataset):
|
40 |
+
def __init__(self, df: pl.LazyFrame | pl.DataFrame):
|
41 |
+
super().__init__()
|
42 |
+
|
43 |
+
self.start = 0
|
44 |
+
self.end = df.lazy().select(pl.len()).collect().item()
|
45 |
+
|
46 |
+
self.df = df.lazy()
|
47 |
+
|
48 |
+
def __iter__(self):
|
49 |
+
worker_info = torch.utils.data.get_worker_info()
|
50 |
+
|
51 |
+
df = self.df
|
52 |
+
|
53 |
+
start = self.start
|
54 |
+
end = self.end
|
55 |
+
|
56 |
+
if worker_info is not None:
|
57 |
+
per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
|
58 |
+
worker_id = worker_info.id
|
59 |
+
start = self.start + worker_id * per_worker
|
60 |
+
end = min(start + per_worker, self.end)
|
61 |
+
|
62 |
+
df_slice = df.slice(start, end - start)
|
63 |
+
|
64 |
+
for row in iter_rows(df_slice, 2048):
|
65 |
+
yield row
|
66 |
+
|
67 |
+
|
68 |
+
def get_default_dataloader(dataset: YambdaIterableDataset, collator, batch_size: int):
|
69 |
+
return torch.utils.data.DataLoader(
|
70 |
+
dataset,
|
71 |
+
multiprocessing_context="spawn",
|
72 |
+
num_workers=3,
|
73 |
+
prefetch_factor=10,
|
74 |
+
batch_size=batch_size,
|
75 |
+
collate_fn=collator,
|
76 |
+
pin_memory=True,
|
77 |
+
pin_memory_device="cuda",
|
78 |
+
persistent_workers=True,
|
79 |
+
)
|
benchmarks/yambda/processing/timesplit.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import polars as pl
|
2 |
+
|
3 |
+
from ..constants import Constants
|
4 |
+
|
5 |
+
|
6 |
+
def flat_split_train_val_test(
|
7 |
+
df: pl.LazyFrame,
|
8 |
+
test_timestamp: int,
|
9 |
+
val_size: int = 0,
|
10 |
+
gap_size: int = Constants.GAP_SIZE,
|
11 |
+
drop_non_train_items: bool = False,
|
12 |
+
engine: str = "streaming",
|
13 |
+
) -> tuple[pl.LazyFrame, pl.LazyFrame | None, pl.LazyFrame]:
|
14 |
+
"""
|
15 |
+
Splits the dataset into training, validation, and test segments based on the provided timestamps.
|
16 |
+
|
17 |
+
The segments are defined as follows:
|
18 |
+
- Training set: [0, test_timestamp - gap_size - val_size - gap_size) if val_size != 0,
|
19 |
+
otherwise [0, test_timestamp - gap_size)
|
20 |
+
- Validation set: [test_timestamp - val_size - gap_size, test_timestamp - gap_size), if val_size != 0
|
21 |
+
- Test set: [test_timestamp, +inf)
|
22 |
+
|
23 |
+
It retains only those users and items in the validation and test sets that exist in the training set.
|
24 |
+
|
25 |
+
Parameters:
|
26 |
+
----------
|
27 |
+
df : LazyFrame
|
28 |
+
The dataset in Polars' LazyFrame format.
|
29 |
+
test_timestamp : int | None
|
30 |
+
The timestamp marking the start of the test set;
|
31 |
+
val_size : int | None
|
32 |
+
The size of validation. If 0, no validation set is created.
|
33 |
+
gap_size : int
|
34 |
+
The duration of gap between training and validation/test sets.
|
35 |
+
drop_non_train_items : bool
|
36 |
+
Whether to drop items that are not in the training set.
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
-------
|
40 |
+
tuple[LazyFrame, LazyFrame | None, LazyFrame]
|
41 |
+
A tuple containing LazyFrames for the training, validation (if applicable), and test sets.
|
42 |
+
"""
|
43 |
+
|
44 |
+
def drop(df: pl.LazyFrame, unique_train_item_ids) -> pl.LazyFrame:
|
45 |
+
if not drop_non_train_items:
|
46 |
+
return df
|
47 |
+
|
48 |
+
return (
|
49 |
+
df.with_columns(
|
50 |
+
pl.col("item_id").is_in(unique_train_item_ids.get_column("item_id").implode()).alias("item_id_in_train")
|
51 |
+
)
|
52 |
+
.filter("item_id_in_train")
|
53 |
+
.drop("item_id_in_train")
|
54 |
+
)
|
55 |
+
|
56 |
+
train_timestamp = test_timestamp - gap_size - val_size - (gap_size if val_size != 0 else 0)
|
57 |
+
|
58 |
+
assert gap_size >= 0
|
59 |
+
assert val_size >= 0
|
60 |
+
assert train_timestamp > 0
|
61 |
+
|
62 |
+
df_lazy = df.lazy()
|
63 |
+
|
64 |
+
train = df_lazy.filter(pl.col("timestamp") < train_timestamp)
|
65 |
+
|
66 |
+
unique_train_uids = train.select("uid").unique().collect(engine=engine)
|
67 |
+
unique_train_item_ids = train.select("item_id").unique().collect(engine=engine)
|
68 |
+
|
69 |
+
validation = None
|
70 |
+
if val_size != 0:
|
71 |
+
validation = (
|
72 |
+
df_lazy.filter(
|
73 |
+
(pl.col("timestamp") >= test_timestamp - val_size - gap_size)
|
74 |
+
& (pl.col("timestamp") < test_timestamp - gap_size)
|
75 |
+
)
|
76 |
+
#
|
77 |
+
.with_columns(
|
78 |
+
pl.col("uid").is_in(unique_train_uids.get_column("uid").implode()).alias("uid_in_train")
|
79 |
+
) # to prevent filter reordering
|
80 |
+
.filter("uid_in_train")
|
81 |
+
.drop("uid_in_train")
|
82 |
+
)
|
83 |
+
|
84 |
+
validation = drop(validation, unique_train_item_ids)
|
85 |
+
|
86 |
+
test = (
|
87 |
+
df_lazy.filter(pl.col("timestamp") >= test_timestamp)
|
88 |
+
#
|
89 |
+
.with_columns(
|
90 |
+
pl.col("uid").is_in(unique_train_uids.get_column("uid").implode()).alias("uid_in_train")
|
91 |
+
) # to prevent filter reordering
|
92 |
+
.filter("uid_in_train")
|
93 |
+
.drop("uid_in_train")
|
94 |
+
)
|
95 |
+
|
96 |
+
test = drop(test, unique_train_item_ids)
|
97 |
+
|
98 |
+
return train, validation, test
|
99 |
+
|
100 |
+
|
101 |
+
def sequential_split_train_val_test(
|
102 |
+
df: pl.LazyFrame,
|
103 |
+
test_timestamp: int,
|
104 |
+
val_size: int = 0,
|
105 |
+
gap_size: int = Constants.GAP_SIZE,
|
106 |
+
drop_non_train_items: bool = False,
|
107 |
+
engine: str = "streaming",
|
108 |
+
) -> tuple[pl.LazyFrame, pl.LazyFrame | None, pl.LazyFrame]:
|
109 |
+
"""
|
110 |
+
Splits the dataset into training, validation, and test segments based on the provided timestamps.
|
111 |
+
|
112 |
+
The segments are defined as follows:
|
113 |
+
- Training set: [0, test_timestamp - gap_size - val_size - gap_size) if val_size != 0,
|
114 |
+
otherwise [0, test_timestamp - gap_size)
|
115 |
+
- Validation set: [test_timestamp - val_size - gap_size, test_timestamp - gap_size), if val_size != 0
|
116 |
+
- Test set: [test_timestamp, +inf)
|
117 |
+
|
118 |
+
It retains only those users and items in the validation and test sets that exist in the training set.
|
119 |
+
|
120 |
+
Parameters:
|
121 |
+
----------
|
122 |
+
df : LazyFrame
|
123 |
+
The dataset in Polars' LazyFrame format.
|
124 |
+
test_timestamp : int | None
|
125 |
+
The timestamp marking the start of the test set;
|
126 |
+
val_size : int | None
|
127 |
+
The size of validation. If 0, no validation set is created.
|
128 |
+
gap_size : int
|
129 |
+
The duration of gap between training and validation/test sets.
|
130 |
+
drop_non_train_items : bool
|
131 |
+
Whether to drop items that are not in the training set.
|
132 |
+
|
133 |
+
Returns:
|
134 |
+
-------
|
135 |
+
tuple[LazyFrame, LazyFrame | None, LazyFrame]
|
136 |
+
A tuple containing LazyFrames for the training, validation (if applicable), and test sets.
|
137 |
+
"""
|
138 |
+
|
139 |
+
def drop(df: pl.LazyFrame, unique_train_item_ids) -> pl.LazyFrame:
|
140 |
+
if not drop_non_train_items:
|
141 |
+
return df
|
142 |
+
|
143 |
+
return df.select(
|
144 |
+
"uid",
|
145 |
+
pl.all()
|
146 |
+
.exclude("uid")
|
147 |
+
.list.gather(
|
148 |
+
pl.col("item_id").list.eval(
|
149 |
+
pl.arg_where(pl.element().is_in(unique_train_item_ids.get_column("item_id").implode()))
|
150 |
+
)
|
151 |
+
),
|
152 |
+
).filter(pl.col("item_id").list.len() > 0)
|
153 |
+
|
154 |
+
train_timestamp = test_timestamp - gap_size - val_size - (gap_size if val_size != 0 else 0)
|
155 |
+
|
156 |
+
assert gap_size >= 0
|
157 |
+
assert val_size >= 0
|
158 |
+
assert train_timestamp > 0
|
159 |
+
|
160 |
+
df_lazy = df.lazy()
|
161 |
+
|
162 |
+
train = df_lazy.select(
|
163 |
+
"uid",
|
164 |
+
pl.all()
|
165 |
+
.exclude("uid")
|
166 |
+
.list.gather(pl.col("timestamp").list.eval(pl.arg_where(pl.element() < train_timestamp))),
|
167 |
+
).filter(pl.col("item_id").list.len() > 0)
|
168 |
+
|
169 |
+
unique_train_uids = train.select("uid").unique().collect(engine=engine)
|
170 |
+
unique_train_item_ids = train.explode("item_id").select("item_id").unique().collect(engine=engine)
|
171 |
+
|
172 |
+
validation = None
|
173 |
+
if val_size != 0:
|
174 |
+
validation = (
|
175 |
+
df_lazy.select(
|
176 |
+
"uid",
|
177 |
+
pl.all()
|
178 |
+
.exclude("uid")
|
179 |
+
.list.gather(
|
180 |
+
pl.col("timestamp").list.eval(
|
181 |
+
pl.arg_where(
|
182 |
+
(pl.element() >= test_timestamp - val_size - gap_size)
|
183 |
+
& (pl.element() < test_timestamp - gap_size)
|
184 |
+
)
|
185 |
+
)
|
186 |
+
),
|
187 |
+
)
|
188 |
+
.with_columns(
|
189 |
+
pl.col("uid").is_in(unique_train_uids.get_column("uid").implode()).alias("uid_in_train")
|
190 |
+
) # to prevent filter reordering
|
191 |
+
.filter("uid_in_train")
|
192 |
+
.drop("uid_in_train")
|
193 |
+
)
|
194 |
+
|
195 |
+
validation = drop(validation, unique_train_item_ids).filter(pl.col("item_id").list.len() > 0)
|
196 |
+
|
197 |
+
test = (
|
198 |
+
df_lazy.select(
|
199 |
+
"uid",
|
200 |
+
pl.all()
|
201 |
+
.exclude("uid")
|
202 |
+
.list.gather(pl.col("timestamp").list.eval(pl.arg_where(pl.element() >= test_timestamp))),
|
203 |
+
)
|
204 |
+
#
|
205 |
+
.with_columns(
|
206 |
+
pl.col("uid").is_in(unique_train_uids.get_column("uid").implode()).alias("uid_in_train")
|
207 |
+
) # to prevent filter reordering
|
208 |
+
.filter("uid_in_train")
|
209 |
+
.drop("uid_in_train")
|
210 |
+
)
|
211 |
+
|
212 |
+
test = drop(test, unique_train_item_ids).filter(pl.col("item_id").list.len() > 0)
|
213 |
+
|
214 |
+
return train, validation, test
|
benchmarks/yambda/utils.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
from typing import Any
|
3 |
+
|
4 |
+
|
5 |
+
def sum_dicts(d1: dict[Any, Any], d2: dict[Any, Any]) -> dict[Any, Any]:
|
6 |
+
if len(d1) == 0:
|
7 |
+
return d2
|
8 |
+
if len(d2) == 0:
|
9 |
+
return d1
|
10 |
+
|
11 |
+
if d1.keys() != d2.keys():
|
12 |
+
raise ValueError("Keys do not match.")
|
13 |
+
|
14 |
+
result = {}
|
15 |
+
for key in d1:
|
16 |
+
assert isinstance(d1, type(d2))
|
17 |
+
|
18 |
+
if isinstance(d1[key], dict) and isinstance(d2[key], dict):
|
19 |
+
result[key] = sum_dicts(d1[key], d2[key])
|
20 |
+
else:
|
21 |
+
result[key] = d1[key] + d2[key]
|
22 |
+
|
23 |
+
return result
|
24 |
+
|
25 |
+
|
26 |
+
def divide_dict(d: dict[Any, Any], denom: float) -> dict[Any, Any]:
|
27 |
+
result = {}
|
28 |
+
for key in d:
|
29 |
+
if isinstance(d[key], dict):
|
30 |
+
result[key] = divide_dict(d[key], denom)
|
31 |
+
else:
|
32 |
+
result[key] = d[key] / denom
|
33 |
+
|
34 |
+
return result
|
35 |
+
|
36 |
+
|
37 |
+
def mean_dicts(arr: list[dict[Any, Any]]) -> dict[Any, Any]:
|
38 |
+
return divide_dict(functools.reduce(sum_dicts, arr), len(arr))
|
39 |
+
|
40 |
+
|
41 |
+
def argmax(a: list[Any], key=lambda x: x) -> Any:
|
42 |
+
return max(range(len(a)), key=lambda x: key(a[x]))
|