File size: 5,022 Bytes
7670b16
 
 
 
c91f75f
7670b16
66897ef
bb641ad
66897ef
4e68dcf
8174cbb
4e68dcf
 
 
 
 
 
66897ef
7670b16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65e0c4b
7670b16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eef3de8
4590483
029752b
9a041b9
e7d96e2
348b569
eef3de8
 
7670b16
 
 
 
 
 
 
 
 
 
 
07ca38c
 
7670b16
 
 
 
 
07ca38c
7670b16
07ca38c
7670b16
 
eef3de8
 
 
 
348b569
eef3de8
7670b16
eef3de8
7670b16
eef3de8
 
7670b16
 
 
 
 
58238f7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import gradio as gr
import os
import random
import numpy as np
import gdown

from datasets import load_dataset
from hfserver import HuggingFaceDatasetSaver, HuggingFaceDatasetJSONSaver

# download data from huggingface dataset
# dataset = load_dataset("quantumiracle-git/robotinder-data")

# download data from google drive
# url = 'https://drive.google.com/drive/folders/10UmNM2YpvNSkdLMgYiIAxk5IbS4dUezw?usp=sharing'
# output = './'
# id = url.split('/')[-1]
# os.system(f"gdown --id {id} -O {output} --folder --no-cookies")

def video_identity(video):
    return video

def nan():
    return None

# demo = gr.Interface(video_identity, 
#                     gr.Video(), 
#                     "playable_video", 
#                     examples=[
#                         os.path.join(os.path.dirname(__file__), 
#                                      "videos/rl-video-episode-0.mp4")], 
#                     cache_examples=True)

FORMAT = ['mp4', 'gif'][1]

def update(data_folder='videos'):
    # data_folder='videos'
    envs = parse_envs()   
    env_name = envs[random.randint(0, len(envs)-1)]
    # choose video
    videos = os.listdir(os.path.join(data_folder, env_name))
    video_files = []
    for f in videos:
        if f.endswith(f'.{FORMAT}'):
            video_files.append(os.path.join(data_folder, env_name, f))
    # choose two videos
    selected_video_ids = np.random.choice(len(video_files), 2, replace=False)
    left = video_files[selected_video_ids[0]]
    right = video_files[selected_video_ids[1]]
    print(env_name, left, right)
    return left, right

# def update(left, right):
#     if FORMAT == 'mp4':
#         left = os.path.join(os.path.dirname(__file__), 
#                              "videos/rl-video-episode-2.mp4")
#         right = os.path.join(os.path.dirname(__file__), 
#                              "videos/rl-video-episode-3.mp4")
#     else:
#         left = os.path.join(os.path.dirname(__file__), 
#                              "videos/rl-video-episode-2.gif")
#         right = os.path.join(os.path.dirname(__file__), 
#                              "videos/rl-video-episode-3.gif")  
#     print(left, right)     
#     return left, right

def replay(left, right):  
    return left, right

def parse_envs(folder='./videos'):
    envs = []
    for f in os.listdir(folder):
        if os.path.isdir(os.path.join(folder, f)):
            envs.append(f)
    return envs

def build_interface(iter=3, data_folder='./videos'):
    HF_TOKEN = os.getenv('HF_TOKEN')
    print(HF_TOKEN)
    HF_TOKEN = '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'  # my HF token
    # hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-robotinder-demo")  # HuggingFace logger instead of local one: https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py
    hf_writer = HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-robotinder-demo")
    # callback = gr.CSVLogger()
    callback = hf_writer
    
    # build gradio interface
    with gr.Blocks() as demo:
        gr.Markdown("Here is RoboTinder!")
        gr.Markdown("Select the best robot behaviour in your choice!")
        with gr.Row():
            # some initial videos
            if FORMAT == 'mp4':
                left_video_path = os.path.join(os.path.dirname(__file__), 
                                    "videos/rl-video-episode-0.mp4")
                right_video_path = os.path.join(os.path.dirname(__file__), 
                                    "videos/rl-video-episode-1.mp4")
                left = gr.PlayableVideo(left_video_path, label="left_video")
                right = gr.PlayableVideo(right_video_path, label="right_video")
            else:
                left_video_path = os.path.join(os.path.dirname(__file__), 
                                    "videos/rl-video-episode-0.gif")
                right_video_path = os.path.join(os.path.dirname(__file__), 
                                    "videos/rl-video-episode-1.gif")  
                left = gr.Image(left_video_path, shape=(1024, 768), label="left_video")
                # right = gr.Image(right_video_path).style(height=768, width=1024)
                right = gr.Image(right_video_path, label="right_video")

        btn1 = gr.Button("Replay")
        user_choice = gr.Radio(["Left", "Right", "Not Sure"], label="Which one is your favorite?")
        btn2 = gr.Button("Next")

        # This needs to be called at some point prior to the first call to callback.flag()
        callback.setup([user_choice, left, right], "flagged_data_points")
        
        btn1.click(fn=replay, inputs=[left, right], outputs=[left, right])
        btn2.click(fn=update, inputs=None, outputs=[left, right])

        # We can choose which components to flag -- in this case, we'll flag all of them
        btn2.click(lambda *args: callback.flag(args), [user_choice, left, right], None, preprocess=False)

    return demo

if __name__ == "__main__":
    demo = build_interface()
    # demo.launch(share=True)
    demo.launch(share=False)