|
import torch |
|
|
|
def check_gpu(): |
|
print("π Checking CUDA and GPU details...\n") |
|
|
|
|
|
if torch.cuda.is_available(): |
|
device = torch.device("cuda") |
|
print("β
CUDA is available.") |
|
print(f"π₯οΈ GPU Name: {torch.cuda.get_device_name(0)}") |
|
print(f"π GPU Memory: {round(torch.cuda.get_device_properties(0).total_memory / 1024**3, 2)} GB") |
|
|
|
|
|
x = torch.rand(1000, 1000).to(device) |
|
y = torch.mm(x, x) |
|
print(f"π Tensor computation successful on GPU! Tensor shape: {y.shape}") |
|
else: |
|
print("β CUDA is NOT available. Using CPU fallback.") |
|
x = torch.rand(1000, 1000) |
|
y = torch.mm(x, x) |
|
print(f"β
CPU computation done. Tensor shape: {y.shape}") |
|
|
|
if __name__ == "__main__": |
|
check_gpu() |
|
|