{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "collapsed": true, "id": "PmRSSg__E-qm", "outputId": "2aecdb88-1734-46de-b579-9b169e5163b7" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/103.9 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m103.9/103.9 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m160.4/160.4 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m95.8/95.8 kB\u001b[0m \u001b[31m10.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m57.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Building wheel for liac-arff (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m471.4/471.4 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h" ] } ], "source": [ "!pip install -q tabicl\n", "!pip install -q openml\n", "!pip install -q kaggle\n", "!pip install -q skrub -U" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "xTQPIegPezQC", "outputId": "3fe90c6b-b82a-468a-a335-587286a93696" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Mounted at /content/MyDrive\n" ] } ], "source": [ "from google.colab import drive\n", "drive.mount('/content/MyDrive')" ] }, { "cell_type": "code", "source": [ "from typing import Optional\n", "import os, json\n", "import numpy as np\n", "import pandas as pd\n", "import torch\n", "from skrub import TableVectorizer\n", "from tabicl import TabICLClassifier\n", "from sklearn.impute import SimpleImputer\n", "from sklearn.pipeline import make_pipeline\n", "from sklearn.preprocessing import OrdinalEncoder\n", "from sklearn.metrics import accuracy_score, roc_auc_score" ], "metadata": { "id": "_Ou6aK8ZkReU" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "### Custom Softmax" ], "metadata": { "id": "NoR1dLt_kcTM" } }, { "cell_type": "code", "source": [ "# raw_logits = None\n", "# @staticmethod\n", "# def hook_softmax(x, axis: int = -1, temperature: float = 0.9):\n", "# \"\"\"Compute softmax values with temperature scaling using NumPy.\n", "\n", "# Parameters\n", "# ----------\n", "# x : ndarray\n", "# Input array of logits.\n", "\n", "# axis : int, default=-1\n", "# Axis along which to compute softmax.\n", "\n", "# temperature : float, default=0.9\n", "# Temperature scaling parameter.\n", "\n", "# Returns\n", "# -------\n", "# ndarray\n", "# Softmax probabilities along the specified axis, with the same shape as the input.\n", "# \"\"\"\n", "# global raw_logits\n", "# raw_logits = np.copy(x) # save raw logits\n", "# x = x / temperature\n", "# # Subtract max for numerical stability\n", "# x_max = np.max(x, axis=axis, keepdims=True)\n", "# e_x = np.exp(x - x_max)\n", "# # Compute softmax\n", "# return e_x / np.sum(e_x, axis=axis, keepdims=True)\n", "\n", "# # Replace original softmax with hooked one\n", "# TabICLClassifier.softmax = hook_softmax" ], "metadata": { "id": "MnL-8godkV5G" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Conformal Prediction" ], "metadata": { "id": "wgxFkG3Fj9kD" } }, { "cell_type": "code", "source": [ "import numpy as np\n", "from numpy._typing import NDArray\n", "\n", "def confidence_score(probs: NDArray):\n", " return np.max(-probs, axis=1)\n", "\n", "def margin_score(probs: NDArray):\n", " sorted_probs = np.sort(probs, axis=1)\n", " return sorted_probs[:, -2] - sorted_probs[:, -1]\n", "\n", "def entropy_score(probs: NDArray, eps = 1e-9):\n", " return -np.sum(probs * np.log(probs + eps), axis=1)\n", "\n", "def nnl_score(probs: NDArray, true_labels: NDArray, eps = 1e-9):\n", " return -np.log(probs[np.arange(probs.shape[0]), true_labels] + eps)\n", "\n", "def ri_score(probs: NDArray, eps = 1e-9):\n", " return -np.sum(np.log(probs + eps), axis=1)\n", "\n", "\n", "def lac_conformal_score(probs: NDArray, true_labels: NDArray):\n", " \"\"\"\n", " Compute the LAC conformal score for a batch of softmax score vectors and true labels.\n", "\n", " Parameters:\n", " - probs: 2D numpy array of shape (n_samples, num_classes), softmax probs for each sample\n", " - true_labels: 1D numpy array of shape (n_samples,), true class labels for each sample\n", "\n", " Returns:\n", " - conformal_scores: 1D numpy array of shape (n_samples,), LAC conformal probs for each sample\n", " \"\"\"\n", " conformal_scores = 1 - probs[np.arange(probs.shape[0]), true_labels]\n", " return conformal_scores\n", "\n", "def aps_conformal_score(probs: NDArray, true_labels: NDArray):\n", " \"\"\"\n", " Compute the APS conformal score for a batch of softmax score vectors and true labels.\n", "\n", " Parameters:\n", " - probs: 2D numpy array of shape (n_samples, num_classes), softmax probs for each sample\n", " - true_labels: 1D numpy array of shape (n_samples,), true class labels for each sample\n", "\n", " Returns:\n", " - conformal_scores: 1D numpy array of shape (n_samples,), APS conformal probs for each sample\n", " \"\"\"\n", " # Create a mask for each sample: probs >= true_score\n", " true_scores = probs[np.arange(probs.shape[0]), true_labels]\n", " mask = probs >= true_scores[:, np.newaxis]\n", " # Sum along the class axis\n", " conformal_scores = np.sum(probs * mask, axis=1)\n", "\n", " return conformal_scores\n", "\n", "def compute_quantile(probs: NDArray, calibration_labels, n: int, type = \"lac\", alpha = 0.1):\n", " if type == \"lac\":\n", " scores = lac_conformal_score(probs, calibration_labels)\n", " elif type == \"aps\":\n", " scores = aps_conformal_score(probs, calibration_labels)\n", " else:\n", " raise AttributeError(f\"type {type} is not supported. Use 'lac' or 'aps'\")\n", "\n", " q_level = np.ceil((n + 1) * (1 - alpha)) / n\n", " return np.quantile(scores, q_level, method=\"higher\")\n", "\n", "def lac_prediction_set(calibration_probs: NDArray, probs: NDArray, calibration_labels: NDArray, alpha = 0.1):\n", " n = calibration_labels.shape[0]\n", " cal_scores = 1 - calibration_probs[np.arange(calibration_probs.shape[0]), calibration_labels]\n", " # Get the score quantile\n", "\n", " q_level = np.ceil((n + 1) * (1 - alpha)) / n\n", " qhat = np.quantile(cal_scores, q_level, method='higher')\n", "\n", " prediction_sets = probs >= (1 - qhat)\n", " return prediction_sets\n", "\n", "def aps_prediction_set(calibration_probs: NDArray, probs: NDArray, calibration_labels: NDArray, alpha = 0.1):\n", " # Get scores. calib_X.shape[0] == calib_Y.shape[0] == n\n", " n = calibration_labels.shape[0]\n", " cal_order = calibration_probs.argsort(1)[:,::-1]\n", " # cal_sum = cal_probs[np.arange(n)[:, None], cal_pi].cumsum(axis=1)\n", " cal_sum = np.take_along_axis(calibration_probs, cal_order, axis=1).cumsum(axis=1)\n", " cal_scores = np.take_along_axis(cal_sum, cal_order.argsort(axis=1), axis=1)[range(n),calibration_labels]\n", "\n", " # Get the score quantile\n", " q_level = np.ceil((n + 1) * (1 - alpha)) / n\n", " qhat = np.quantile(cal_scores, q_level, method='higher')\n", "\n", " # Deploy (output=list of length n, each element is tensor of classes)\n", " test_order = probs.argsort(1)[:,::-1]\n", " test_sum = np.take_along_axis(probs,test_order,axis=1).cumsum(axis=1)\n", " prediction_sets = np.take_along_axis(test_sum <= qhat, test_order.argsort(axis=1), axis=1)\n", " return prediction_sets\n", "\n", "def raps_prediction_set(calibration_probs: NDArray, test_probs: NDArray, calibration_labels: NDArray, alpha = 0.1, lam_reg=0.01, k_reg = 5, disallow_zero_sets = False, rand = True):\n", " probs = np.concatenate([calibration_probs, test_probs], axis=0)\n", " k_reg = min(k_reg, probs.shape[1] - 1)\n", " reg_vec = np.array(k_reg * [0,] + (probs.shape[1] - k_reg) * [lam_reg,])[None, :]\n", "\n", " n = calibration_labels.shape[0]\n", " cal_order = calibration_probs.argsort(axis=1)[:,::-1]\n", " cal_sort = np.take_along_axis(calibration_probs, cal_order, axis=1)\n", " cal_sort_reg = cal_sort + reg_vec\n", " cal_true_labels = np.where(cal_order == calibration_labels[:,None])[1]\n", " cal_scores = cal_sort_reg.cumsum(axis=1)[np.arange(n), cal_true_labels] - np.random.rand(n) * cal_sort_reg[np.arange(n), cal_true_labels]\n", "\n", " # Get the score quantile\n", " q_level = np.ceil((n + 1) * (1 - alpha)) / n\n", " qhat = np.quantile(cal_scores, q_level, method='higher')\n", "\n", " n_test = test_probs.shape[0]\n", " test_order = test_probs.argsort(1)[:,::-1]\n", " test_sort = np.take_along_axis(test_probs, test_order, axis=1)\n", " test_sort_reg = test_sort + reg_vec\n", " test_srt_reg_cumsum = test_sort_reg.cumsum(axis=1)\n", " indicators = (test_srt_reg_cumsum - np.random.rand(n_test, 1) * test_sort_reg) <= qhat if rand else test_srt_reg_cumsum - test_sort_reg <= qhat\n", "\n", " if disallow_zero_sets: indicators[:,0] = True\n", " prediction_sets = np.take_along_axis(indicators, test_order.argsort(axis=1), axis=1)\n", " return prediction_sets\n", "\n", "def accuracy(y_true, y_pred):\n", " return np.mean(y_true == y_pred)\n", "\n", "def set_size(pred_set):\n", " return np.mean([np.sum(ps) for ps in pred_set])\n", "\n", "def coverage_rate(y_true, pred_set):\n", " return pred_set[np.arange(pred_set.shape[0]), y_true].mean()" ], "metadata": { "id": "evNphyC0kAJx" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "C1VFdL01dffC" }, "source": [ "## Eye Movement" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "gQYXkdVuFnNX" }, "outputs": [], "source": [ "import numpy as np\n", "import torch\n", "import openml\n", "from tabicl import TabICLClassifier\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import accuracy_score, roc_auc_score" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "F3ZdlqQUFzdV" }, "outputs": [], "source": [ "dataset = openml.datasets.get_dataset(1044) # or 31 or 40688\n", "\n", "X, y, _, _ = dataset.get_data(\n", " dataset_format=\"dataframe\", target=dataset.default_target_attribute)\n", "\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "yM1CUt2-LahM", "outputId": "c5502c00-9642-420e-d558-9e31fea40212" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using device: cuda\n" ] } ], "source": [ "device = \"cuda\" if torch.cuda.is_available() else (\n", " \"mps\" if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available() else \"cpu\"\n", ")\n", "print(\"Using device:\", device)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "gP-R0-oRFPg0" }, "outputs": [], "source": [ "clf = TabICLClassifier(device=device)\n", "clf.fit(X_train, y_train) # this is cheap\n", "proba = clf.predict_proba(X_test) # in-context learning happens here" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "m4fSOHNOLE3_", "outputId": "8925965e-146b-4474-c87f-dcf19579b760" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "ROC AUC: 0.8958693234799394\n", "Accuracy: 0.7340036563071298\n" ] } ], "source": [ "print(\"ROC AUC:\", roc_auc_score(y_test.to_numpy(dtype=int), proba, multi_class='ovo'))\n", "y = np.argmax(proba, axis=1)\n", "y_pred = clf.y_encoder_.inverse_transform(y)\n", "print(\"Accuracy:\", accuracy_score(y_test, y_pred))" ] }, { "cell_type": "markdown", "metadata": { "id": "gfQvbD9BdqJC" }, "source": [ "## Rain in Autralia" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 332 }, "id": "2us2QwSIdryV", "outputId": "ebc49be5-9dbf-4c8b-ed92-ed16194d20e2" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using device: cuda\n", "C_train: (93094, 6) object\n", "X_train: (93094, 18) object\n", "X_test : (29092, 18) object\n", "y_train: (93094,) int64\n", "y_test: (29092,) int64\n", "y_test unique: [0 1 2]\n" ] }, { "ename": "AttributeError", "evalue": "'numpy.ndarray' object has no attribute 'to_numpy'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipython-input-595776624.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mproba\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpipe\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict_proba\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# in-context learning happens here\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"ROC AUC:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mroc_auc_score\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my_test\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mproba\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmulti_class\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'ovo'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0my_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mclf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Accuracy:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maccuracy_score\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_pred\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mAttributeError\u001b[0m: 'numpy.ndarray' object has no attribute 'to_numpy'" ] } ], "source": [ "from typing import Optional\n", "import os, json\n", "import numpy as np\n", "import pandas as pd\n", "import torch\n", "from skrub import TableVectorizer\n", "from tabicl import TabICLClassifier\n", "from sklearn.impute import SimpleImputer\n", "from sklearn.pipeline import make_pipeline\n", "from sklearn.preprocessing import OrdinalEncoder\n", "from sklearn.metrics import accuracy_score, roc_auc_score\n", "\n", "DATA_DIR = '/content/MyDrive/MyDrive/Datasets/Rain_in_Australia'\n", "\n", "device = \"cuda\" if torch.cuda.is_available() else (\n", " \"mps\" if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available() else \"cpu\"\n", ")\n", "print(\"Using device:\", device)\n", "\n", "def load(name) -> Optional[np.ndarray]:\n", " p = os.path.join(DATA_DIR, name)\n", " return np.load(p, allow_pickle=True) if os.path.exists(p) else None\n", "\n", "# ---- load arrays ----\n", "C_train, N_train, y_train = load('C_train.npy'), load('N_train.npy'), load('y_train.npy')\n", "C_val, N_val, y_val = load('C_val.npy'), load('N_val.npy'), load('y_val.npy')\n", "C_test, N_test, y_test = load('C_test.npy'), load('N_test.npy'), load('y_test.npy')\n", "\n", "print(\"C_train:\", C_train.shape, C_train.dtype)\n", "# ---- build X by concatenating [C | N] ----\n", "def concat_features(C_part, N_part):\n", " parts = [p for p in (C_part, N_part) if p is not None]\n", " if not parts:\n", " raise ValueError(\"No features found (need at least C_* or N_*).\")\n", " return np.concatenate(parts, axis=1) if len(parts) > 1 else parts[0]\n", "\n", "X_train = concat_features(C_train, N_train)\n", "X_val = concat_features(C_val, N_val) if (C_val is not None or N_val is not None) else None\n", "X_test = concat_features(C_test, N_test)\n", "\n", "print(\"X_train:\", X_train.shape, X_train.dtype)\n", "print(\"X_test :\", X_test.shape, X_test.dtype)\n", "print(\"y_train:\", y_train.shape, y_train.dtype)\n", "print(\"y_test:\", y_test.shape, y_test.dtype)\n", "print(\"y_test unique:\", np.unique(y_test))\n", "\n", "X_train = pd.DataFrame(X_train)\n", "X_test = pd.DataFrame(X_test)\n", "\n", "pipe = make_pipeline(\n", " TableVectorizer(), # Automatically handles various data types\n", " TabICLClassifier(device=device)\n", ")\n", "# pipe = TabICLClassifier(device=device)\n", "\n", "pipe.fit(X_train, y_train) # this is cheap\n", "proba = pipe.predict_proba(X_test) # in-context learning happens here\n", "print(\"ROC AUC:\", roc_auc_score(y_test, proba, multi_class='ovo'))\n", "y_pred = pipe.predict(X_test)\n", "print(\"Accuracy:\", accuracy_score(y_test, y_pred))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "uWOT-zrmkUir", "outputId": "d7efdba9-c5ce-4de8-8b51-ff2d404b1c13" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "ROC AUC: 0.8840392885541616\n", "Accuracy: 0.8509212154544205\n" ] } ], "source": [ "print(\"ROC AUC:\", roc_auc_score(y_test, proba, multi_class='ovo'))\n", "y = np.argmax(proba, axis=1)\n", "y_pred = pipe.y_encoder_.inverse_transform(y)\n", "print(\"Accuracy:\", accuracy_score(y_test, y_pred))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "yuukqVGooxwJ" }, "outputs": [], "source": [ "print(\"ROC AUC:\", roc_auc_score(y_test, proba, multi_class='ovr'))\n", "y = np.argmax(proba, axis=1)\n", "y_pred = pipe.y_encoder_.inverse_transform(y)\n", "print(\"Accuracy:\", accuracy_score(y_test, y_pred))" ] }, { "cell_type": "markdown", "metadata": { "id": "MupH1gZPiPph" }, "source": [ "## Banknote auth" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "FZmc0osUiPXr", "outputId": "ef61624e-08d6-4d6e-f8d7-829c25ef3a7b" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Using device: cuda\n", "X_train: (877, 4) float64\n", "X_test : (275, 4) float64\n", "y_train: (877,) int64\n", "y_test: (275,) int64\n", "y_test unique: [0 1]\n" ] } ], "source": [ "DATA_DIR = '/content/MyDrive/MyDrive/Datasets/banknote_authentication'\n", "\n", "device = \"cuda\" if torch.cuda.is_available() else (\n", " \"mps\" if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available() else \"cpu\"\n", ")\n", "print(\"Using device:\", device)\n", "\n", "def load(name) -> Optional[np.ndarray]:\n", " p = os.path.join(DATA_DIR, name)\n", " return np.load(p, allow_pickle=True) if os.path.exists(p) else None\n", "\n", "# ---- load arrays ----\n", "N_train, y_train = load('N_train.npy'), load('y_train.npy')\n", "N_val, y_val = load('N_val.npy'), load('y_val.npy')\n", "N_test, y_test = load('N_test.npy'), load('y_test.npy')\n", "\n", "X_train = N_train\n", "X_val = N_val\n", "X_test = N_test\n", "\n", "print(\"X_train:\", X_train.shape, X_train.dtype)\n", "print(\"X_test :\", X_test.shape, X_test.dtype)\n", "print(\"y_train:\", y_train.shape, y_train.dtype)\n", "print(\"y_test:\", y_test.shape, y_test.dtype)\n", "print(\"y_test unique:\", np.unique(y_test))\n", "\n", "X_train = pd.DataFrame(X_train)\n", "X_test = pd.DataFrame(X_test)\n", "\n", "# pipe = make_pipeline(\n", "# TableVectorizer(), # Automatically handles various data types\n", "# TabICLClassifier(device=device)\n", "# )\n", "pipe = TabICLClassifier(device=device)\n", "\n", "pipe.fit(X_train, y_train) # this is cheap\n", "cal_proba = pipe.predict_proba(X_val) # in-context learning happens here\n", "proba = pipe.predict_proba(X_test) # in-context learning happens here" ] }, { "cell_type": "code", "source": [ "lac_pred_set = lac_prediction_set(cal_proba, proba, y_val)\n", "aps_pred_set = aps_prediction_set(cal_proba, proba, y_val)\n", "raps_pred_set = raps_prediction_set(cal_proba, proba, y_val)" ], "metadata": { "id": "vvGP7d7xkpk7" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "print(\"ROC AUC:\", roc_auc_score(y_test, proba[:,1]))\n", "y = np.argmax(proba, axis=1)\n", "y_pred = pipe.y_encoder_.inverse_transform(y)\n", "print(\"Accuracy:\", accuracy_score(y_test, y_pred))\n", "print(\"SS (LAC):\", set_size(lac_pred_set))\n", "print(\"SS (APS):\", set_size(aps_pred_set))\n", "print(\"SS (RAPS):\", set_size(raps_pred_set))\n", "print(\"CR (LAC):\", coverage_rate(y_test, lac_pred_set))\n", "print(\"CR (APS):\", coverage_rate(y_test, aps_pred_set))\n", "print(\"CR (RAPS):\", coverage_rate(y_test, raps_pred_set))" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Sp87-tIbkheD", "outputId": "ad366dee-6cbf-4c4c-89b7-7754d1c5ea59" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "ROC AUC: 0.5084913746919533\n", "Accuracy: 0.5563636363636364\n", "SS (LAC): 1.8254545454545454\n", "SS (APS): 2.0\n", "SS (RAPS): 1.8581818181818182\n", "CR (LAC): 0.9236363636363636\n", "CR (APS): 1.0\n", "CR (RAPS): 0.9418181818181818\n" ] } ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "T4", "provenance": [] }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 0 }