quantumiracle-git commited on
Commit
83e5a4c
·
1 Parent(s): 027bc4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -6
app.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import random
4
  import numpy as np
5
  import gdown
 
6
  from time import gmtime, strftime
7
  from csv import writer
8
 
@@ -39,13 +40,26 @@ else: # local data
39
  VIDEO_PATH = 'robotinder-data'
40
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def video_identity(video):
43
  return video
44
 
45
  def nan():
46
  return None
47
 
48
- FORMAT = ['mp4', 'gif'][1]
49
 
50
  def get_huggingface_dataset():
51
  try:
@@ -78,7 +92,9 @@ def get_huggingface_dataset():
78
  def update(user_choice, left, right, data_folder=VIDEO_PATH, flag_to_huggingface=True):
79
  global last_left_video_path
80
  global last_right_video_path
81
-
 
 
82
  if flag_to_huggingface: # log
83
  env_name = str(last_left_video_path).split('/')[1] # 'robotinder-data/ENV_NAME/'
84
  current_time = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
@@ -110,7 +126,10 @@ def update(user_choice, left, right, data_folder=VIDEO_PATH, flag_to_huggingface
110
  right = video_files[selected_video_ids[1]]
111
  last_left_video_path = left
112
  last_right_video_path = right
113
- return left, right
 
 
 
114
 
115
  def replay(left, right):
116
  return left, right
@@ -123,6 +142,10 @@ def parse_envs(folder=VIDEO_PATH):
123
  return envs
124
 
125
  def build_interface(iter=3, data_folder=VIDEO_PATH):
 
 
 
 
126
  HF_TOKEN = os.getenv('HF_TOKEN')
127
  print(HF_TOKEN)
128
  HF_TOKEN = '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' # my HF token
@@ -130,7 +153,7 @@ def build_interface(iter=3, data_folder=VIDEO_PATH):
130
  hf_writer = HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-robotinder-demo")
131
  # callback = gr.CSVLogger()
132
  callback = hf_writer
133
-
134
  # build gradio interface
135
  with gr.Blocks() as demo:
136
  gr.Markdown("Here is RoboTinder!")
@@ -150,8 +173,13 @@ def build_interface(iter=3, data_folder=VIDEO_PATH):
150
  left_video_path = video_files[selected_video_ids[0]]
151
  right_video_path = video_files[selected_video_ids[1]]
152
  if FORMAT == 'mp4':
153
- left = gr.PlayableVideo(left_video_path, label="left_video")
154
- right = gr.PlayableVideo(right_video_path, label="right_video")
 
 
 
 
 
155
  else:
156
  left = gr.Image(left_video_path, shape=(1024, 768), label="left_video")
157
  # right = gr.Image(right_video_path).style(height=768, width=1024)
@@ -162,6 +190,11 @@ def build_interface(iter=3, data_folder=VIDEO_PATH):
162
  global last_right_video_path
163
  last_right_video_path = right_video_path
164
 
 
 
 
 
 
165
  btn1 = gr.Button("Replay")
166
  user_choice = gr.Radio(["Left", "Right", "Not Sure"], label="Which one is your favorite?")
167
  btn2 = gr.Button("Next")
 
3
  import random
4
  import numpy as np
5
  import gdown
6
+ import base64
7
  from time import gmtime, strftime
8
  from csv import writer
9
 
 
40
  VIDEO_PATH = 'robotinder-data'
41
 
42
 
43
+ def inference(video_path):
44
+ with open(video_path, "rb") as f:
45
+ data = f.read()
46
+ b64 = base64.b64encode(data).decode()
47
+ html = (
48
+ f"""
49
+ <video controls autoplay muted loop>
50
+ <source src="data:video/mp4;base64,{b64}" type="video/mp4">
51
+ </video>
52
+ """
53
+ )
54
+ return html
55
+
56
  def video_identity(video):
57
  return video
58
 
59
  def nan():
60
  return None
61
 
62
+ FORMAT = ['mp4', 'gif'][0]
63
 
64
  def get_huggingface_dataset():
65
  try:
 
92
  def update(user_choice, left, right, data_folder=VIDEO_PATH, flag_to_huggingface=True):
93
  global last_left_video_path
94
  global last_right_video_path
95
+ global last_infer_left_video_path
96
+ global last_infer_right_video_path
97
+
98
  if flag_to_huggingface: # log
99
  env_name = str(last_left_video_path).split('/')[1] # 'robotinder-data/ENV_NAME/'
100
  current_time = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
 
126
  right = video_files[selected_video_ids[1]]
127
  last_left_video_path = left
128
  last_right_video_path = right
129
+ last_infer_left_video_path = inference(left)
130
+ last_infer_right_video_path = inference(right)
131
+
132
+ return last_infer_left_video_path, last_infer_right_video_path
133
 
134
  def replay(left, right):
135
  return left, right
 
142
  return envs
143
 
144
  def build_interface(iter=3, data_folder=VIDEO_PATH):
145
+ import sys
146
+ import csv
147
+ csv.field_size_limit(sys.maxsize)
148
+
149
  HF_TOKEN = os.getenv('HF_TOKEN')
150
  print(HF_TOKEN)
151
  HF_TOKEN = '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' # my HF token
 
153
  hf_writer = HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-robotinder-demo")
154
  # callback = gr.CSVLogger()
155
  callback = hf_writer
156
+
157
  # build gradio interface
158
  with gr.Blocks() as demo:
159
  gr.Markdown("Here is RoboTinder!")
 
173
  left_video_path = video_files[selected_video_ids[0]]
174
  right_video_path = video_files[selected_video_ids[1]]
175
  if FORMAT == 'mp4':
176
+ # left = gr.PlayableVideo(left_video_path, label="left_video")
177
+ # right = gr.PlayableVideo(right_video_path, label="right_video")
178
+
179
+ infer_left_video_path = inference(left_video_path)
180
+ infer_right_video_path = inference(right_video_path)
181
+ right = gr.HTML(infer_right_video_path, label="right_video")
182
+ left = gr.HTML(infer_left_video_path, label="left_video")
183
  else:
184
  left = gr.Image(left_video_path, shape=(1024, 768), label="left_video")
185
  # right = gr.Image(right_video_path).style(height=768, width=1024)
 
190
  global last_right_video_path
191
  last_right_video_path = right_video_path
192
 
193
+ global last_infer_left_video_path
194
+ last_infer_left_video_path = infer_left_video_path
195
+ global last_infer_right_video_path
196
+ last_infer_right_video_path = infer_right_video_path
197
+
198
  btn1 = gr.Button("Replay")
199
  user_choice = gr.Radio(["Left", "Right", "Not Sure"], label="Which one is your favorite?")
200
  btn2 = gr.Button("Next")