Spaces:
Runtime error
Runtime error
Commit
·
16e1dde
1
Parent(s):
a43ccba
Model added
Browse files- app.py +20 -4
- model_mnist.pth +0 -0
- train.py +89 -0
app.py
CHANGED
@@ -1,7 +1,23 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from PIL import Image, ImageOps
|
3 |
+
from torchvision import transforms
|
4 |
+
import numpy as np
|
5 |
|
6 |
+
labels = ['Zero','Um','Dois','Três','Quatro','Cinco','Seis','Sete','Oito', 'Nove']
|
|
|
7 |
|
8 |
+
# LOADING MODEL
|
9 |
+
model.load_state_dict(torch.load("model_mnist.pth", map_location=torch.device('cuda')))
|
10 |
+
|
11 |
+
|
12 |
+
def predict(input):
|
13 |
+
input = torch.from_numpy(input.reshape(1, 1, 28, 28)).to(dtype=torch.float32, device=device)
|
14 |
+
|
15 |
+
with torch.no_grad():
|
16 |
+
outputs = model(input)
|
17 |
+
prediction = torch.nn.functional.softmax(outputs[0], dim=0)
|
18 |
+
confidences = {labels[i]: float(prediction[i]) for i in range(10)}
|
19 |
+
return confidences
|
20 |
+
|
21 |
+
gr.Interface(title='Classificador de dígitos', fn=predict,
|
22 |
+
inputs="sketchpad",
|
23 |
+
outputs=gr.Label(num_top_classes=3)).launch(share=True, debug=True)
|
model_mnist.pth
ADDED
Binary file (15.3 kB). View file
|
|
train.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import torchvision
|
5 |
+
import torchvision.transforms as transforms
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
|
8 |
+
|
9 |
+
if torch.cuda.is_available():
|
10 |
+
device = torch.device("cuda:0")
|
11 |
+
print("GPU")
|
12 |
+
else:
|
13 |
+
device = torch.device("cpu")
|
14 |
+
print("CPU")
|
15 |
+
|
16 |
+
# MNIST dataset
|
17 |
+
batch_size=64
|
18 |
+
|
19 |
+
train_dataset = torchvision.datasets.MNIST(root='./data',
|
20 |
+
train=True,
|
21 |
+
transform=transforms.ToTensor(),
|
22 |
+
download=True)
|
23 |
+
|
24 |
+
test_dataset = torchvision.datasets.MNIST(root='./data',
|
25 |
+
train=False,
|
26 |
+
transform=transforms.ToTensor())
|
27 |
+
|
28 |
+
# Data loader
|
29 |
+
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
|
30 |
+
batch_size=batch_size,
|
31 |
+
shuffle=True)
|
32 |
+
|
33 |
+
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
|
34 |
+
batch_size=batch_size,
|
35 |
+
shuffle=False)
|
36 |
+
|
37 |
+
|
38 |
+
# NEURAL NETWORK
|
39 |
+
class LeNet(nn.Module):
|
40 |
+
def __init__(self):
|
41 |
+
super(LeNet, self).__init__()
|
42 |
+
|
43 |
+
self.convs = nn.Sequential(
|
44 |
+
nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(5, 5)),
|
45 |
+
nn.Tanh(),
|
46 |
+
nn.AvgPool2d(2, 2),
|
47 |
+
|
48 |
+
nn.Conv2d(in_channels=4, out_channels=12, kernel_size=(5, 5)),
|
49 |
+
nn.Tanh(),
|
50 |
+
nn.AvgPool2d(2, 2)
|
51 |
+
)
|
52 |
+
|
53 |
+
self.linear = nn.Sequential(
|
54 |
+
nn.Linear(4*4*12,10)
|
55 |
+
)
|
56 |
+
|
57 |
+
def forward(self, x):
|
58 |
+
x = self.convs(x)
|
59 |
+
x = torch.flatten(x, 1)
|
60 |
+
|
61 |
+
return self.linear(x)
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
# TRAIN PARAMETERS
|
66 |
+
criterion = nn.CrossEntropyLoss()
|
67 |
+
model_adam = LeNet().to(device)
|
68 |
+
optimizer = torch.optim.Adam(model_adam.parameters(), lr=0.05)
|
69 |
+
n_steps = len(train_loader)
|
70 |
+
num_epochs = 10
|
71 |
+
|
72 |
+
# TRAIN
|
73 |
+
def train(model):
|
74 |
+
for epoch in range(num_epochs):
|
75 |
+
for i, (images, labels) in enumerate(train_loader):
|
76 |
+
|
77 |
+
images = images.to(device)
|
78 |
+
labels = labels.to(device)
|
79 |
+
|
80 |
+
# Forward pass
|
81 |
+
outputs = model(images)
|
82 |
+
loss = criterion(outputs, labels)
|
83 |
+
|
84 |
+
# Backward and optimize
|
85 |
+
optimizer.zero_grad()
|
86 |
+
loss.backward()
|
87 |
+
optimizer.step()
|
88 |
+
|
89 |
+
torch.save(model_adam.state_dict(), "model_mnist.pth")
|