{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "e5e0f994" }, "source": [ "# 🚀 Baseline XGBoost for Resource Estimation of CNNs (Keras Applications)\n", "This notebook demonstrates how to use XGBoost for predicting resource usage (like fit time) of CNN models based on dataset features." ] }, { "cell_type": "markdown", "metadata": { "id": "275c013b" }, "source": [ "## 1️⃣ Setup and Installation\n", "Ensure required libraries are installed." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "DPbLUZKvRtwx", "outputId": "d65bcfd7-a615-4b74-feb6-757456f42581" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Found existing installation: scikit-learn 1.6.1\n", "Uninstalling scikit-learn-1.6.1:\n", " Successfully uninstalled scikit-learn-1.6.1\n", "Collecting scikit-learn==1.5.2\n", " Downloading scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (13 kB)\n", "Requirement already satisfied: numpy>=1.19.5 in /usr/local/lib/python3.11/dist-packages (from scikit-learn==1.5.2) (2.0.2)\n", "Requirement already satisfied: scipy>=1.6.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn==1.5.2) (1.14.1)\n", "Requirement already satisfied: joblib>=1.2.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn==1.5.2) (1.4.2)\n", "Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn==1.5.2) (3.6.0)\n", "Downloading scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (13.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.3/13.3 MB\u001b[0m \u001b[31m34.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: scikit-learn\n", "Successfully installed scikit-learn-1.5.2\n" ] } ], "source": [ "!pip uninstall -y scikit-learn\n", "!pip install scikit-learn==1.5.2" ] }, { "cell_type": "markdown", "metadata": { "id": "48b0b5f0" }, "source": [ "## 2️⃣ Import Libraries\n", "Import all necessary Python libraries for data handling, modeling, and visualization." ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "id": "V23vhp8o9YHM" }, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import mean_squared_error\n", "from xgboost import XGBRegressor\n", "import joblib" ] }, { "cell_type": "markdown", "metadata": { "id": "107733d4" }, "source": [ "## 3️⃣ Data Loading & Preprocessing\n", "Load the dataset and perform basic preprocessing to prepare for modeling." ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "id": "UoYmjX7NGVVD" }, "outputs": [], "source": [ "def calculate_mspe_rmspe(y_true, y_pred):\n", " mape = np.mean(np.abs((y_true - y_pred) / (y_true)), axis=0) * 100\n", " mspe = np.mean(((y_true - y_pred) / y_true) ** 2, axis=0) * 100 # MSPE for each column\n", " rmspe = np.sqrt(mspe) # RMSPE for each column\n", " return mape, mspe, rmspe\n", "\n" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "CmmE7SNz-KXJ", "outputId": "dc55b8bf-2000-4954-b231-664d715851de" }, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "Index(['name', 'samples', 'input_dim_w', 'input_dim_h', 'input_dim_c',\n", " 'output_dim', 'optimizer', 'epochs', 'batch', 'learn_rate',\n", " 'tf_version', 'cuda_version', 'batch_time', 'epoch_time', 'fit_time',\n", " 'npz_path', 'gpu_make', 'gpu_name', 'gpu_arch', 'gpu_cc',\n", " 'gpu_core_count', 'gpu_sm_count', 'gpu_memory_size', 'gpu_memory_type',\n", " 'gpu_memory_bw', 'gpu_tensor_core_count', 'max_memory_util',\n", " 'avg_memory_util', 'max_gpu_util', 'avg_gpu_util', 'max_gpu_temp',\n", " 'avg_gpu_temp'],\n", " dtype='object')" ] }, "metadata": {}, "execution_count": 7 } ], "source": [ "# Load data\n", "# Assuming the data is in a CSV file with the target column 'fit_time_in_TF'\n", "data_path = 'dataset-new.csv' # Replace with the actual path to your dataset\n", "df = pd.read_csv(data_path)\n", "# Another way is to directly load the data from the hugging face where the dataset is hosted\n", "# url = 'https://huggingface.co/datasets/ICICLE-AI/ResourceEstimation_HLOGenCNN/resolve/main/dataset-new.csv'\n", "# df = pd.read_csv(url)\n", "df.columns" ] }, { "cell_type": "markdown", "metadata": { "id": "962d5030" }, "source": [ "## 4️⃣ Feature Engineering\n", "Extract relevant features and clean the dataset." ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "OGzq5lrIHh2R", "outputId": "9ee9eabd-7363-451a-b379-013b1aa7688d" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ " name unit_name\n", "0 MobileNet_architecture_optadam_s1_ipd224x224x3... MobileNet\n", "1 MobileNet_architecture_optadam_s1_ipd224x224x3... MobileNet\n", "2 MobileNet_architecture_optadam_s1_ipd224x224x3... MobileNet\n", "3 MobileNet_architecture_optadam_s1_ipd224x224x3... MobileNet\n", "4 MobileNet_architecture_optadam_s1_ipd224x224x3... MobileNet\n" ] } ], "source": [ "# Extract substring before the first underscore\n", "df['unit_name'] = df['name'].str.split('_').str[0]\n", "\n", "# Display the updated DataFrame\n", "print(df[['name', 'unit_name']].head())" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "mw1fY7vM-fCw", "outputId": "a8903a63-83f3-4b6f-b061-4a4bb900cd1d" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "Label Mapping: {'DenseNet121': 0, 'DenseNet169': 1, 'DenseNet201': 2, 'EfficientNetB0': 3, 'EfficientNetB1': 4, 'EfficientNetB7': 5, 'InceptionV3': 6, 'MobileNet': 7, 'MobileNetV2': 8, 'NASNetLarge': 9, 'NASNetMobile': 10, 'ResNet101': 11, 'ResNet152': 12, 'ResNet50': 13, 'VGG16': 14, 'VGG19': 15, 'Xception': 16}\n" ] } ], "source": [ "df = df.dropna() # Dropping rows with missing values (you can customize this)\n", "\n", "from sklearn.preprocessing import LabelEncoder\n", "label_encoder = LabelEncoder()\n", "# Transform the categorical column\n", "df['unit_name_encoded'] = label_encoder.fit_transform(df['unit_name'])\n", "# Optional: Mapping of encoded labels to original categories\n", "mapping = dict(zip(label_encoder.classes_, range(len(label_encoder.classes_))))\n", "print(\"\\nLabel Mapping:\", mapping)\n", "\n", "df = df.drop(columns=['name', 'npz_path', 'unit_name'])\n", "# Convert categorical features to numeric (if any)\n", "df = pd.get_dummies(df, drop_first=True)\n" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 290 }, "id": "04XJeqln-g4n", "outputId": "f63e3507-6770-41be-dc9c-b03a3cb232a6" }, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ " samples input_dim_w input_dim_h input_dim_c output_dim epochs batch \\\n", "0 1 224 224 3 10 1 1 \n", "1 1 224 224 3 10 1 1 \n", "2 1 224 224 3 10 1 1 \n", "3 1 224 224 3 10 2 1 \n", "4 1 224 224 3 10 2 1 \n", "\n", " learn_rate cuda_version batch_time ... max_gpu_util avg_gpu_util \\\n", "0 0.0100 12.2 22.07 ... 13.0 0.51 \n", "1 0.0010 12.2 18.44 ... 100.0 2.92 \n", "2 0.0001 12.2 18.78 ... 26.0 0.86 \n", "3 0.0100 12.2 9.38 ... 28.0 1.78 \n", "4 0.0010 12.2 9.30 ... 100.0 3.41 \n", "\n", " max_gpu_temp avg_gpu_temp unit_name_encoded optimizer_sgd \\\n", "0 25.0 25.00 7 False \n", "1 26.0 25.84 7 False \n", "2 26.0 26.00 7 False \n", "3 27.0 26.04 7 False \n", "4 27.0 26.55 7 False \n", "\n", " gpu_name_Tesla P100-PCIE-16GB gpu_name_Tesla V100S-PCIE-32GB \\\n", "0 True False \n", "1 True False \n", "2 True False \n", "3 True False \n", "4 True False \n", "\n", " gpu_arch_Tesla gpu_memory_type_hbm2e \n", "0 True False \n", "1 True False \n", "2 True False \n", "3 True False \n", "4 True False \n", "\n", "[5 rows x 30 columns]" ], "text/html": [ "\n", "
\n", " | samples | \n", "input_dim_w | \n", "input_dim_h | \n", "input_dim_c | \n", "output_dim | \n", "epochs | \n", "batch | \n", "learn_rate | \n", "cuda_version | \n", "batch_time | \n", "... | \n", "max_gpu_util | \n", "avg_gpu_util | \n", "max_gpu_temp | \n", "avg_gpu_temp | \n", "unit_name_encoded | \n", "optimizer_sgd | \n", "gpu_name_Tesla P100-PCIE-16GB | \n", "gpu_name_Tesla V100S-PCIE-32GB | \n", "gpu_arch_Tesla | \n", "gpu_memory_type_hbm2e | \n", "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", "1 | \n", "224 | \n", "224 | \n", "3 | \n", "10 | \n", "1 | \n", "1 | \n", "0.0100 | \n", "12.2 | \n", "22.07 | \n", "... | \n", "13.0 | \n", "0.51 | \n", "25.0 | \n", "25.00 | \n", "7 | \n", "False | \n", "True | \n", "False | \n", "True | \n", "False | \n", "
1 | \n", "1 | \n", "224 | \n", "224 | \n", "3 | \n", "10 | \n", "1 | \n", "1 | \n", "0.0010 | \n", "12.2 | \n", "18.44 | \n", "... | \n", "100.0 | \n", "2.92 | \n", "26.0 | \n", "25.84 | \n", "7 | \n", "False | \n", "True | \n", "False | \n", "True | \n", "False | \n", "
2 | \n", "1 | \n", "224 | \n", "224 | \n", "3 | \n", "10 | \n", "1 | \n", "1 | \n", "0.0001 | \n", "12.2 | \n", "18.78 | \n", "... | \n", "26.0 | \n", "0.86 | \n", "26.0 | \n", "26.00 | \n", "7 | \n", "False | \n", "True | \n", "False | \n", "True | \n", "False | \n", "
3 | \n", "1 | \n", "224 | \n", "224 | \n", "3 | \n", "10 | \n", "2 | \n", "1 | \n", "0.0100 | \n", "12.2 | \n", "9.38 | \n", "... | \n", "28.0 | \n", "1.78 | \n", "27.0 | \n", "26.04 | \n", "7 | \n", "False | \n", "True | \n", "False | \n", "True | \n", "False | \n", "
4 | \n", "1 | \n", "224 | \n", "224 | \n", "3 | \n", "10 | \n", "2 | \n", "1 | \n", "0.0010 | \n", "12.2 | \n", "9.30 | \n", "... | \n", "100.0 | \n", "3.41 | \n", "27.0 | \n", "26.55 | \n", "7 | \n", "False | \n", "True | \n", "False | \n", "True | \n", "False | \n", "
5 rows × 30 columns
\n", "XGBRegressor(base_score=None, booster=None, callbacks=None,\n", " colsample_bylevel=None, colsample_bynode=None,\n", " colsample_bytree=None, device=None, early_stopping_rounds=None,\n", " enable_categorical=False, eval_metric=None, feature_types=None,\n", " gamma=None, grow_policy=None, importance_type=None,\n", " interaction_constraints=None, learning_rate=0.1, max_bin=None,\n", " max_cat_threshold=None, max_cat_to_onehot=None,\n", " max_delta_step=None, max_depth=6, max_leaves=None,\n", " min_child_weight=None, missing=nan, monotone_constraints=None,\n", " multi_strategy=None, n_estimators=100, n_jobs=None,\n", " num_parallel_tree=None, random_state=42, ...)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
XGBRegressor(base_score=None, booster=None, callbacks=None,\n", " colsample_bylevel=None, colsample_bynode=None,\n", " colsample_bytree=None, device=None, early_stopping_rounds=None,\n", " enable_categorical=False, eval_metric=None, feature_types=None,\n", " gamma=None, grow_policy=None, importance_type=None,\n", " interaction_constraints=None, learning_rate=0.1, max_bin=None,\n", " max_cat_threshold=None, max_cat_to_onehot=None,\n", " max_delta_step=None, max_depth=6, max_leaves=None,\n", " min_child_weight=None, missing=nan, monotone_constraints=None,\n", " multi_strategy=None, n_estimators=100, n_jobs=None,\n", " num_parallel_tree=None, random_state=42, ...)