Commit
·
19a05d0
1
Parent(s):
1955e12
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,6 +10,12 @@ from csv import writer
|
|
| 10 |
from datasets import load_dataset
|
| 11 |
from hfserver import HuggingFaceDatasetSaver, HuggingFaceDatasetJSONSaver
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
# download data from huggingface dataset
|
| 14 |
# dataset = load_dataset("quantumiracle-git/robotinder-data")
|
| 15 |
|
|
@@ -89,7 +95,7 @@ def get_huggingface_dataset():
|
|
| 89 |
log_file = os.path.join(dataset_dir, "flag_data.csv")
|
| 90 |
return repo, log_file
|
| 91 |
|
| 92 |
-
def update(user_choice, left, right, data_folder=VIDEO_PATH, flag_to_huggingface=True):
|
| 93 |
global last_left_video_path
|
| 94 |
global last_right_video_path
|
| 95 |
global last_infer_left_video_path
|
|
@@ -111,9 +117,11 @@ def update(user_choice, left, right, data_folder=VIDEO_PATH, flag_to_huggingface
|
|
| 111 |
except:
|
| 112 |
repo.git_pull(lfs=True) # sync with remote first
|
| 113 |
repo.push_to_hub(commit_message=f"Flagged sample at {current_time}")
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
|
|
|
|
|
|
| 117 |
# choose video
|
| 118 |
videos = os.listdir(os.path.join(data_folder, env_name))
|
| 119 |
video_files = []
|
|
@@ -156,22 +164,26 @@ def build_interface(iter=3, data_folder=VIDEO_PATH):
|
|
| 156 |
|
| 157 |
# build gradio interface
|
| 158 |
with gr.Blocks() as demo:
|
| 159 |
-
gr.Markdown("Here is RoboTinder
|
| 160 |
gr.Markdown("Select the best robot behaviour in your choice!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
with gr.Row():
|
| 162 |
-
# some initial values
|
| 163 |
-
envs = parse_envs()
|
| 164 |
-
env_name = envs[random.randint(0, len(envs)-1)] # random pick an env
|
| 165 |
-
# choose video
|
| 166 |
-
videos = os.listdir(os.path.join(data_folder, env_name))
|
| 167 |
-
video_files = []
|
| 168 |
-
for f in videos:
|
| 169 |
-
if f.endswith(f'.{FORMAT}'):
|
| 170 |
-
video_files.append(os.path.join(data_folder, env_name, f))
|
| 171 |
-
# randomly choose two videos
|
| 172 |
-
selected_video_ids = np.random.choice(len(video_files), 2, replace=False)
|
| 173 |
-
left_video_path = video_files[selected_video_ids[0]]
|
| 174 |
-
right_video_path = video_files[selected_video_ids[1]]
|
| 175 |
if FORMAT == 'mp4':
|
| 176 |
# left = gr.PlayableVideo(left_video_path, label="left_video")
|
| 177 |
# right = gr.PlayableVideo(right_video_path, label="right_video")
|
|
@@ -197,13 +209,14 @@ def build_interface(iter=3, data_folder=VIDEO_PATH):
|
|
| 197 |
|
| 198 |
# btn1 = gr.Button("Replay")
|
| 199 |
user_choice = gr.Radio(["Left", "Right", "Not Sure"], label="Which one is your favorite?")
|
|
|
|
| 200 |
btn2 = gr.Button("Next")
|
| 201 |
|
| 202 |
# This needs to be called at some point prior to the first call to callback.flag()
|
| 203 |
callback.setup([user_choice, left, right], "flagged_data_points")
|
| 204 |
|
| 205 |
# btn1.click(fn=replay, inputs=[left, right], outputs=[left, right])
|
| 206 |
-
btn2.click(fn=update, inputs=[user_choice, left, right], outputs=[left, right])
|
| 207 |
|
| 208 |
# We can choose which components to flag -- in this case, we'll flag all of them
|
| 209 |
btn2.click(lambda *args: callback.flag(args), [user_choice, left, right], None, preprocess=False)
|
|
|
|
| 10 |
from datasets import load_dataset
|
| 11 |
from hfserver import HuggingFaceDatasetSaver, HuggingFaceDatasetJSONSaver
|
| 12 |
|
| 13 |
+
ENVS = ['ShadowHand', 'ShadowHandCatchAbreast', 'ShadowHandOver', 'ShadowHandBlockStack', 'ShadowHandCatchUnderarm',
|
| 14 |
+
'ShadowHandCatchOver2Underarm', 'ShadowHandBottleCap', 'ShadowHandLiftUnderarm', 'ShadowHandTwoCatchUnderarm',
|
| 15 |
+
'ShadowHandDoorOpenInward', 'ShadowHandDoorOpenOutward', 'ShadowHandDoorCloseInward', 'ShadowHandDoorCloseOutward',
|
| 16 |
+
'ShadowHandPushBlock', 'ShadowHandKettle',
|
| 17 |
+
'ShadowHandScissors', 'ShadowHandPen', 'ShadowHandSwingCup', 'ShadowHandGraspAndPlace', 'ShadowHandSwitch']
|
| 18 |
+
|
| 19 |
# download data from huggingface dataset
|
| 20 |
# dataset = load_dataset("quantumiracle-git/robotinder-data")
|
| 21 |
|
|
|
|
| 95 |
log_file = os.path.join(dataset_dir, "flag_data.csv")
|
| 96 |
return repo, log_file
|
| 97 |
|
| 98 |
+
def update(user_choice, left, right, choose_env, data_folder=VIDEO_PATH, flag_to_huggingface=True):
|
| 99 |
global last_left_video_path
|
| 100 |
global last_right_video_path
|
| 101 |
global last_infer_left_video_path
|
|
|
|
| 117 |
except:
|
| 118 |
repo.git_pull(lfs=True) # sync with remote first
|
| 119 |
repo.push_to_hub(commit_message=f"Flagged sample at {current_time}")
|
| 120 |
+
if choose_env == 'Random':
|
| 121 |
+
envs = parse_envs()
|
| 122 |
+
env_name = envs[random.randint(0, len(envs)-1)]
|
| 123 |
+
else:
|
| 124 |
+
env_name = choose_env
|
| 125 |
# choose video
|
| 126 |
videos = os.listdir(os.path.join(data_folder, env_name))
|
| 127 |
video_files = []
|
|
|
|
| 164 |
|
| 165 |
# build gradio interface
|
| 166 |
with gr.Blocks() as demo:
|
| 167 |
+
gr.Markdown("Here is **RoboTinder**!")
|
| 168 |
gr.Markdown("Select the best robot behaviour in your choice!")
|
| 169 |
+
# some initial values
|
| 170 |
+
envs = parse_envs()
|
| 171 |
+
env_name = envs[random.randint(0, len(envs)-1)] # random pick an env
|
| 172 |
+
with gr.Row():
|
| 173 |
+
str_env_name = gr.Markdown(f"{env_name}")
|
| 174 |
+
|
| 175 |
+
# choose video
|
| 176 |
+
videos = os.listdir(os.path.join(data_folder, env_name))
|
| 177 |
+
video_files = []
|
| 178 |
+
for f in videos:
|
| 179 |
+
if f.endswith(f'.{FORMAT}'):
|
| 180 |
+
video_files.append(os.path.join(data_folder, env_name, f))
|
| 181 |
+
# randomly choose two videos
|
| 182 |
+
selected_video_ids = np.random.choice(len(video_files), 2, replace=False)
|
| 183 |
+
left_video_path = video_files[selected_video_ids[0]]
|
| 184 |
+
right_video_path = video_files[selected_video_ids[1]]
|
| 185 |
+
|
| 186 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 187 |
if FORMAT == 'mp4':
|
| 188 |
# left = gr.PlayableVideo(left_video_path, label="left_video")
|
| 189 |
# right = gr.PlayableVideo(right_video_path, label="right_video")
|
|
|
|
| 209 |
|
| 210 |
# btn1 = gr.Button("Replay")
|
| 211 |
user_choice = gr.Radio(["Left", "Right", "Not Sure"], label="Which one is your favorite?")
|
| 212 |
+
choose_env = gr.Radio(["Random"]+ENVS, label="Choose the next task:")
|
| 213 |
btn2 = gr.Button("Next")
|
| 214 |
|
| 215 |
# This needs to be called at some point prior to the first call to callback.flag()
|
| 216 |
callback.setup([user_choice, left, right], "flagged_data_points")
|
| 217 |
|
| 218 |
# btn1.click(fn=replay, inputs=[left, right], outputs=[left, right])
|
| 219 |
+
btn2.click(fn=update, inputs=[user_choice, left, right, choose_env], outputs=[left, right, str_env_name])
|
| 220 |
|
| 221 |
# We can choose which components to flag -- in this case, we'll flag all of them
|
| 222 |
btn2.click(lambda *args: callback.flag(args), [user_choice, left, right], None, preprocess=False)
|