Adding SHAP on top of Tiny Time Mixers

#20
by fferrador - opened

hello to you all,

i'm working on my master thesis with the subject "Zero-shot forecasting and Explainable AI" and i would like to explain the predictions of the ttm zero-shot model using SHAP, however, my background is in management so i have limited knowledge in python.

how can i "add" SHAP explanations to my model? the code i'm using is the one provided by the authors (bellow):

thank you for your help

Clone the ibm and tsfm

! git clone --depth 1 --branch v0.2.9 https://github.com/IBM/tsfm.git

Change directory. Move inside the tsfm repo.

%cd tsfm

Install the tsfm library and scikit-learn

! pip install ".[notebooks]" scikit-learn
%cd ../

import os
import math
import tempfile
import torch
from torch.optim import AdamW
from torch.optim.lr_scheduler import OneCycleLR
from transformers import EarlyStoppingCallback, Trainer, TrainingArguments, set_seed
import numpy as np
import pandas as pd

TSFM libraries

from tsfm_public.models.tinytimemixer import TinyTimeMixerForPrediction
from tsfm_public.toolkit.callbacks import TrackingCallback

pollution_data = pd.read_csv(r"C:\Users\fferrado\Documents\0 - PESSOAL\6 - Zero-Shot\pollution.csv")

timestamp_column = "date"
target_columns = ["pm2.5"]
observable_columns = ["DEWP","TEMP","PRES", "Iws"]

pollution_data['date'] = pd.to_datetime(pollution_data[['year','month','day', 'hour']])

#deal with NA
pollution_data['pm2.5'] = pollution_data['pm2.5'].interpolate()

Set seed for reproducibility

SEED = 42
set_seed(SEED)

Results dir

OUT_DIR = "ttm_finetuned_models/"

Forecasting parameters

context_length = 512 # TTM can use 512 time points into the past
forecast_length = 96 # TTM can predict 96 time points into the future

from tsfm_public import (
TimeSeriesPreprocessor,
TinyTimeMixerForPrediction,
TrackingCallback,
count_parameters,
get_datasets,
)

data_length = len(pollution_data)

train_start_index = 0
train_end_index = round(data_length * 0.8)

we shift the start of the evaluation period back by context length so that

the first evaluation timestamp is immediately following the training data

eval_start_index = round(data_length * 0.8)
eval_end_index = round(data_length * 0.9)

test_start_index = round(data_length * 0.9)
test_end_index = data_length

split_config = {
"train": [0, train_end_index],
"valid": [eval_start_index, eval_end_index],
"test": [test_start_index,test_end_index],
}

column_specifiers = {
"timestamp_column": timestamp_column,
"target_columns": target_columns,
"observable_columns": observable_columns
}

tsp = TimeSeriesPreprocessor(
**column_specifiers,
context_length=context_length,
prediction_length=forecast_length,
scaling=True,
encode_categorical=True,
scaler_type="standard", # "minmax" or "standard"
)

this gets torch vectors for training. For test eval we need a Pandas DF

train_dataset, valid_dataset, test_dataset = get_datasets(
tsp, pollution_data, split_config
)

zeroshot_model = TinyTimeMixerForPrediction.from_pretrained("ibm/TTM", revision="main", prediction_filter_length=24)

zeroshot_trainer

zeroshot_trainer = Trainer(
model=zeroshot_model,
)

zeroshot_trainer.evaluate(test_dataset) #note that this is the Torch dataset created by get_datasets(), not a Pandas DataFrame

from tsfm_public.toolkit.util import select_by_index
from tsfm_public.toolkit.time_series_forecasting_pipeline import TimeSeriesForecastingPipeline
from tsfm_public.toolkit.visualization import plot_ts_forecasting

zs_forecast_pipeline = TimeSeriesForecastingPipeline(
model=zeroshot_model,
device="cpu", # we are using the CPU here, but you can use a GPU if you have one
timestamp_column=timestamp_column,
id_columns=[],
target_columns=target_columns,
observable_columns=observable_columns,
freq="1h"
)

zs_forecast = zs_forecast_pipeline(tsp.preprocess(pollution_data[test_start_index:test_end_index]))

def compare_forecast(forecast, date_col, prediction_col, actual_col, hours_out):
comparisons = pd.DataFrame()
comparisons[date_col] = forecast[date_col]
actual = []
pred = []

for i in range(len(forecast)):
pred.append(forecast[prediction_col].values[i][hours_out - 1]) # prediction for next day
actual.append(forecast[actual_col].values[i][hours_out - 1])

comparisons['actual'] = actual
comparisons['pred'] = pred

return comparisons

Sign up or log in to comment