File size: 4,225 Bytes
927579f
629e59e
 
 
 
 
 
 
 
 
 
71e9e2a
629e59e
 
 
 
 
 
 
 
8d6f814
 
 
629e59e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8d6f814
 
 
629e59e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d6818f
629e59e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d6818f
629e59e
 
 
8d6f814
629e59e
aed703a
a251d1a
d300e8c
 
 
ed4c4fa
d300e8c
 
 
 
 
 
 
a251d1a
629e59e
 
9d6818f
f05a10e
629e59e
ef3cc6d
629e59e
9d6818f
f05a10e
629e59e
 
8f22309
629e59e
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import os
import gradio as gr
import requests
import json
from PIL import Image

def compare_face(frame1, frame2):
    url = "https://recognito.p.rapidapi.com/api/face"
    files = {'image1': open(frame1, 'rb'), 'image2': open(frame2, 'rb')}
    headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")}

    r = requests.post(url=url, files=files, headers=headers)
    faces = None

    try:
        image1 = Image.open(frame1)
        image2 = Image.open(frame2)

        face1 = None
        face2 = None
        res1 = r.json().get('image1')
        if res1 is not None:
            face = res1.get('detection')
            x1 = face.get('x')
            y1 = face.get('y')
            x2 = x1 + face.get('w')
            y2 = y1 + face.get('h')
            if x1 < 0:
                x1 = 0
            if y1 < 0:
                y1 = 0
            if x2 >= image1.width:
                x2 = image1.width - 1
            if y2 >= image1.height:
                y2 = image1.height - 1

            face1 = image1.crop((x1, y1, x2, y2))
            face_image_ratio = face1.width / float(face1.height)
            resized_w = int(face_image_ratio * 150)
            resized_h = 150

            face1 = face1.resize((int(resized_w), int(resized_h)))

        res2 = r.json().get('image2')
        if res2 is not None:
            face = res2.get('detection')
            x1 = face.get('x')
            y1 = face.get('y')
            x2 = x1 + face.get('w')
            y2 = y1 + face.get('h')

            if x1 < 0:
                x1 = 0
            if y1 < 0:
                y1 = 0
            if x2 >= image2.width:
                x2 = image2.width - 1
            if y2 >= image2.height:
                y2 = image2.height - 1

            face2 = image2.crop((x1, y1, x2, y2))
            face_image_ratio = face2.width / float(face2.height)
            resized_w = int(face_image_ratio * 150)
            resized_h = 150

            face2 = face2.resize((int(resized_w), int(resized_h)))
        '''
        if face1 is not None and face2 is not None:
            new_image = Image.new('RGB',(face1.width + face2.width + 10, 150), (80,80,80))

            new_image.paste(face1,(0,0))
            new_image.paste(face2,(face1.width + 10, 0))
            faces = new_image.copy()
        elif face1 is not None and face2 is None:
            new_image = Image.new('RGB',(face1.width + face1.width + 10, 150), (80,80,80))

            new_image.paste(face1,(0,0))
            faces = new_image.copy()
        elif face1 is None and face2 is not None:
            new_image = Image.new('RGB',(face2.width + face2.width + 10, 150), (80,80,80))

            new_image.paste(face2,(face2.width + 10, 0))
            faces = new_image.copy()
        '''
    except:
        pass

    return [r.json(), [face1, face2]]

with gr.Blocks(theme='aliabid94/new-theme') as demo:
    '''
    demo.load(
        None,
        None,
        js="""
        () => {
        const params = new URLSearchParams(window.location.search);
        if (!params.has('__theme')) {
            params.set('__theme', 'dark');
            window.location.search = params.toString();
        }
        }""",
    )'''
    with gr.Row():
        with gr.Column():
            compare_face_input1 = gr.Image(label="Image1", type='filepath', height=480)
            gr.Examples(['examples/1.jpg', 'examples/2.jpg', 'examples/3.jpg', 'examples/4.jpg'], 
                        inputs=compare_face_input1)
            compare_face_button = gr.Button("Face Analysis & Verification", variant="primary")
        with gr.Column():
            compare_face_input2 = gr.Image(label="Image2", type='filepath', height=480)
            gr.Examples(['examples/5.jpg', 'examples/6.jpg', 'examples/7.jpg', 'examples/8.jpg'], 
                        inputs=compare_face_input2)
        with gr.Column():
            compare_face_output = gr.Gallery(label="Faces", height=250, columns=[2], rows=[1])
            compare_result_output = gr.JSON(label='Result')

    compare_face_button.click(compare_face, inputs=[compare_face_input1, compare_face_input2], outputs=[compare_result_output, compare_face_output])

demo.launch(server_name="0.0.0.0", server_port=7860)