Boboiazumi commited on
Commit
702796f
·
verified ·
1 Parent(s): b2e8a4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -50
app.py CHANGED
@@ -1,51 +1,51 @@
1
- import gradio as gr
2
- import torch
3
- import torchvision.models as models
4
- from torchvision import transforms
5
- from torch import nn
6
- from PIL import Image
7
-
8
- transform = transforms.Compose([
9
- transforms.Resize((128, 128)),
10
- transforms.ToTensor()
11
- ])
12
-
13
- model = models.mobilenet_v2()
14
- num_ftrs = model.classifier[1].in_features
15
- model.classifier[1] = nn.Linear(num_ftrs, 2)
16
- model = model.to("cpu")
17
- model.load_state_dict(torch.load("cnn_model.pth", weights_only=True, map_location="cpu"))
18
- model.eval()
19
-
20
- label = ["nsfw", "safe"]
21
-
22
- def inference(image):
23
- image = transform(image).unsqueeze(0)
24
-
25
- with torch.no_grad():
26
- output = model(image)
27
- output = torch.nn.functional.softmax(output, dim=1)
28
-
29
- predicted_class = torch.argmax(output, dim=1).item()
30
- score = output[0][predicted_class]
31
-
32
- if label[predicted_class] == "nsfw":
33
- output = f'Boneka ini terlalu seksi dan tidak aman dilihat anak kecil (NSFW) [{label[predicted_class]}:{score}]'
34
- else:
35
- output = f'Boneka ini aman (SAFE) [{label[predicted_class]}:{score}]'
36
-
37
- return output
38
-
39
- with gr.Blocks() as demo:
40
- with gr.Row():
41
- with gr.Column():
42
- inputs = gr.Image(type="pil")
43
- with gr.Column():
44
- btn = gr.Button("Cek")
45
- pred = gr.Text(label="Prediction")
46
-
47
- btn.click(fn=inference, inputs=inputs, outputs=pred)
48
-
49
-
50
-
51
  demo.queue().launch()
 
1
+ import gradio as gr
2
+ import torch
3
+ import torchvision.models as models
4
+ from torchvision import transforms
5
+ from torch import nn
6
+ from PIL import Image
7
+
8
+ transform = transforms.Compose([
9
+ transforms.Resize((128, 128)),
10
+ transforms.ToTensor()
11
+ ])
12
+
13
+ model = models.resnet18()
14
+ num_ftrs = model.fc.in_features
15
+ model.fc = nn.Linear(num_ftrs, 2)
16
+ model = model.to("cpu")
17
+ model.load_state_dict(torch.load("cnn_model.pth", weights_only=True, map_location="cpu"))
18
+ model.eval()
19
+
20
+ label = ["nsfw", "safe"]
21
+
22
+ def inference(image):
23
+ image = transform(image).unsqueeze(0)
24
+
25
+ with torch.no_grad():
26
+ output = model(image)
27
+ output = torch.nn.functional.softmax(output, dim=1)
28
+
29
+ predicted_class = torch.argmax(output, dim=1).item()
30
+ score = output[0][predicted_class]
31
+
32
+ if label[predicted_class] == "nsfw":
33
+ output = f'Boneka ini terlalu seksi dan tidak aman dilihat anak kecil (NSFW) [{label[predicted_class]}:{score}]'
34
+ else:
35
+ output = f'Boneka ini aman (SAFE) [{label[predicted_class]}:{score}]'
36
+
37
+ return output
38
+
39
+ with gr.Blocks() as demo:
40
+ with gr.Row():
41
+ with gr.Column():
42
+ inputs = gr.Image(type="pil")
43
+ with gr.Column():
44
+ btn = gr.Button("Cek")
45
+ pred = gr.Text(label="Prediction")
46
+
47
+ btn.click(fn=inference, inputs=inputs, outputs=pred)
48
+
49
+
50
+
51
  demo.queue().launch()