{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ellNFnP7f2Wx", "outputId": "3adb85e1-f41a-433f-bd77-f1301abb7731" }, "outputs": [], "source": [ "import os\n", "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", "\n", "import psutil\n", "import torch\n", "from datetime import datetime\n", "import time\n", "import pandas as pd\n", "from tqdm.auto import tqdm, trange\n", "import matplotlib.pyplot as plt\n", "\n", "\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from torch.utils.data import DataLoader\n", "from torchvision import datasets, transforms\n", "import torchvision as tv\n", "from torchvision.transforms import v2\n", "import torch.nn.functional as F\n", "from torch.utils.checkpoint import checkpoint\n", "# device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n", "# print(device)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "timestamps = []\n", "cpu_ram_mb = []\n", "cpu_ram_percent = []\n", "gpu_ram_mb = []\n", "gpu_ram_percent = []\n", "\n", "\n", "\n", "# --- System Utilization ---------------------------------------------------------------------------\n", "def get_system_utilization():\n", " current_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n", "\n", " cpu_ram = psutil.virtual_memory()\n", " cpu_ram_utilization_bytes = cpu_ram.used\n", " cpu_ram_utilization_mb = cpu_ram_utilization_bytes / (1024 * 1024)\n", " cpu_ram_percentage = cpu_ram.percent\n", "\n", " gpu_ram_utilization_mb = None\n", " gpu_ram_percentage = None\n", " if torch.cuda.is_available():\n", " gpu_ram_utilization_bytes = torch.cuda.memory_allocated()\n", " gpu_ram_utilization_mb = gpu_ram_utilization_bytes / (1024 * 1024)\n", " gpu_ram_total_bytes = torch.cuda.get_device_properties(0).total_memory\n", " gpu_ram_percentage = (gpu_ram_utilization_bytes / gpu_ram_total_bytes) * 100\n", "\n", " return {\n", " \"time\": current_time,\n", " \"cpu_ram_utilization_mb\": cpu_ram_utilization_mb,\n", " \"cpu_ram_percentage\": cpu_ram_percentage,\n", " \"gpu_ram_utilization_mb\": gpu_ram_utilization_mb,\n", " \"gpu_ram_percentage\": gpu_ram_percentage\n", " }\n", "\n", "\n", "\n", "def update_utilization_lists():\n", " global timestamps, cpu_ram_mb, cpu_ram_percent, gpu_ram_mb, gpu_ram_percent\n", "\n", " utilization = get_system_utilization()\n", "\n", " timestamps.append(utilization[\"time\"])\n", " cpu_ram_mb.append(utilization[\"cpu_ram_utilization_mb\"])\n", " cpu_ram_percent.append(utilization[\"cpu_ram_percentage\"])\n", " gpu_ram_mb.append(utilization[\"gpu_ram_utilization_mb\"])\n", " gpu_ram_percent.append(utilization[\"gpu_ram_percentage\"])" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", " | image_path | \n", "id | \n", "
---|---|---|
0 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "013672.jpg | \n", "
1 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "055563.jpg | \n", "
2 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "145628.jpg | \n", "
3 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "192398.jpg | \n", "
4 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "049887.jpg | \n", "
... | \n", "... | \n", "... | \n", "
8099 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "010392.jpg | \n", "
8100 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "099050.jpg | \n", "
8101 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "048895.jpg | \n", "
8102 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "142243.jpg | \n", "
8103 | \n", "/home/23m1521/datasets/CelebA/img_align_celeba... | \n", "034314.jpg | \n", "
8104 rows × 2 columns
\n", "