File size: 598 Bytes
25b5a23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import gradio
from transformers import AutoImageProcessor, MobileNetV2Model
import torch
from datasets import load_dataset

image_processor = AutoImageProcessor.from_pretrained("Aruno/gemini-beauty")
model = MobileNetV2Model.from_pretrained("Aruno/gemini-beauty")

def inference(img):
    inputs = image_processor(image, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)
    return outputs

iface = gradio.Interface(
  fn=inference,
  inputs='image',
  outputs='image',
  title='Hello World', 
  description='The simplest interface!',
  examples=[])  

iface.launch()