Upload app.py
Browse fileslayout changed
app.py
CHANGED
@@ -1,446 +1,459 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
import numpy as np
|
4 |
-
import pandas as pd
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
import io
|
7 |
-
from PIL import Image
|
8 |
-
import pickle
|
9 |
-
import requests
|
10 |
-
import cv2
|
11 |
-
|
12 |
-
hf_token = {
|
13 |
-
"multicastcustom": os.environ["HF_MulTiCastCustom_Token"],
|
14 |
-
"clipqwentimer": os.environ["HF_CLIPQwenTimer_Token"],
|
15 |
-
"clipllamatimer": os.environ["HF_CLIPLLaMATimer_Token"],
|
16 |
-
"blipqwentimer": os.environ["HF_BLIPQwenTimer_Token"],
|
17 |
-
"blipllamatimer": os.environ["HF_BLIPLLaMATimer_Token"],
|
18 |
-
"clipqwenchronos": os.environ["HF_CLIPQwenChronos_Token"],
|
19 |
-
"clipllamachronos": os.environ["HF_CLIPLLaMAChronos_Token"],
|
20 |
-
"blipqwenchronos": os.environ["HF_BLIPQwenChronos_Token"],
|
21 |
-
"blipllamachronos": os.environ["HF_BLIPLLaMAChronos_Token"]
|
22 |
-
}
|
23 |
-
|
24 |
-
with open('example/inputs.pkl', 'rb') as f:
|
25 |
-
inputs = pickle.load(f)
|
26 |
-
|
27 |
-
with open('example/targets.pkl', 'rb') as f:
|
28 |
-
targets = pickle.load(f)
|
29 |
-
|
30 |
-
descriptions = {
|
31 |
-
"NN5 Daily": "Daily cash withdrawal volumes from automated teller machines (ATMs) in the United Kingdom, originally used in the NN5 forecasting competition.",
|
32 |
-
"Australian Electricity": "Half-hourly electricity demand data across five Australian states.",
|
33 |
-
"CIF 2016": "Monthly banking time series used in the CIF 2016 forecasting challenge, reflecting customer financial behaviours.",
|
34 |
-
"Tourism Monthly": "Monthly tourism-related time series used in the Kaggle Tourism forecasting competition, covering various regions and visitor types.",
|
35 |
-
"Custom": "Custom Dataset"
|
36 |
-
}
|
37 |
-
|
38 |
-
context_length = {
|
39 |
-
"NN5 Daily": 56,
|
40 |
-
"Australian Electricity": 48,
|
41 |
-
"CIF 2016": 12,
|
42 |
-
"Tourism Monthly": 24
|
43 |
-
}
|
44 |
-
|
45 |
-
def selected_dataset(dataset):
|
46 |
-
if dataset == "Custom":
|
47 |
-
gallery_items = []
|
48 |
-
else:
|
49 |
-
gallery_items = [(Image.open(f'example/img/{dataset.replace(" ", "_")}/{i}.png').convert('RGB'), str(i+1)) for i in range(3)]
|
50 |
-
|
51 |
-
gallery_items.append((Image.open('example/img/custom.png').convert('RGB'), 'Custom Input'))
|
52 |
-
|
53 |
-
return gr.Gallery(gallery_items, interactive=False, height="350px", object_fit="contain", preview=True), gr.Textbox(value=descriptions[dataset], label="Dataset Description", interactive=False)
|
54 |
-
|
55 |
-
def selected_example(gallery, evt: gr.SelectData):
|
56 |
-
if evt.index == len(gallery) -1:
|
57 |
-
return -1
|
58 |
-
else:
|
59 |
-
return evt.index
|
60 |
-
|
61 |
-
def update_guide_markdown(dataset, example_index):
|
62 |
-
if example_index is None:
|
63 |
-
return gr.Markdown(visible=False), gr.File(visible=False)
|
64 |
-
|
65 |
-
elif dataset == "Custom":
|
66 |
-
return gr.Markdown(visible=False), gr.File(visible=False)
|
67 |
-
|
68 |
-
elif example_index == -1: # Custom Input
|
69 |
-
return (
|
70 |
-
gr.Markdown(
|
71 |
-
value=f"To use custom input, please use the sample csv file below. Do not change the name of columns. Only the first {context_length[dataset]} values will be used as input time series.",
|
72 |
-
visible=True
|
73 |
-
),
|
74 |
-
gr.File(value="example/sample.csv", label="Sample CSV File", visible=True)
|
75 |
-
)
|
76 |
-
else:
|
77 |
-
df = inputs[dataset][example_index]
|
78 |
-
min = df.min()
|
79 |
-
max = df.max()
|
80 |
-
min_timestamp = pd.Series(min["Timestamp"]).to_string(index=False)
|
81 |
-
max_timestamp = pd.Series(max["Timestamp"]).to_string(index=False)
|
82 |
-
min_value = min["Value"]
|
83 |
-
max_value = max["Value"]
|
84 |
-
return (
|
85 |
-
gr.Markdown(
|
86 |
-
value=f"This time series contains values from {min_timestamp} to {max_timestamp}, with a minimum value of {min_value:.4f} and a maximum value of {max_value:.4f}.",
|
87 |
-
visible=True
|
88 |
-
),
|
89 |
-
gr.File(visible=False)
|
90 |
-
)
|
91 |
-
|
92 |
-
def update_time_series_dataframe(dataset, example_index):
|
93 |
-
if example_index is None:
|
94 |
-
return None, None
|
95 |
-
elif example_index == -1: # Custom Input
|
96 |
-
return gr.File(label="Time Series CSV File", file_types=[".csv"], visible=True), gr.Dataframe(value=None, visible=False)
|
97 |
-
elif dataset == "Custom":
|
98 |
-
return None, None
|
99 |
-
else:
|
100 |
-
df = inputs[dataset][example_index]
|
101 |
-
return gr.File(value=None, visible=False), gr.Dataframe(value=df, label="Time Series Input", interactive=False, visible=True)
|
102 |
-
|
103 |
-
def load_csv(example_index, file):
|
104 |
-
if example_index == -1:
|
105 |
-
if file is not None:
|
106 |
-
return gr.Dataframe(value=pd.read_csv(file.name), visible=True)
|
107 |
-
else:
|
108 |
-
return gr.Dataframe(value=None, visible=False)
|
109 |
-
else:
|
110 |
-
return gr.skip()
|
111 |
-
|
112 |
-
def vision_attention_rollout(attentions, start_layer=0, end_layer=12):
|
113 |
-
seq_len = attentions.shape[-1]
|
114 |
-
result = np.eye(seq_len)
|
115 |
-
|
116 |
-
for attn in attentions[start_layer:end_layer]:
|
117 |
-
attn_heads = attn.mean(axis=0)
|
118 |
-
attn_aug = attn_heads + np.eye(seq_len)
|
119 |
-
attn_aug = attn_aug / attn_aug.sum(axis=-1, keepdims=True)
|
120 |
-
result = attn_aug @ result
|
121 |
-
|
122 |
-
return result[0, -49:]
|
123 |
-
|
124 |
-
def plot_vision_heatmap(image, rollout_attention, alpha=0.5, cmap='jet'):
|
125 |
-
num_patches = rollout_attention.shape[0]
|
126 |
-
grid_size = int(np.sqrt(num_patches))
|
127 |
-
|
128 |
-
attn_grid = rollout_attention.reshape(grid_size, grid_size)
|
129 |
-
|
130 |
-
H, W = image.shape[:2]
|
131 |
-
attn_map = cv2.resize(attn_grid, (W, H), interpolation=cv2.INTER_CUBIC)
|
132 |
-
attn_map = attn_map / attn_map.max()
|
133 |
-
|
134 |
-
plt.figure(figsize=(6,6))
|
135 |
-
plt.imshow(image)
|
136 |
-
plt.imshow(attn_map, cmap=cmap, alpha=alpha)
|
137 |
-
plt.axis('off')
|
138 |
-
buf = io.BytesIO()
|
139 |
-
plt.savefig(buf, format='png')
|
140 |
-
buf.seek(0)
|
141 |
-
plot_img = Image.open(buf).convert('RGB')
|
142 |
-
plt.clf()
|
143 |
-
|
144 |
-
return plot_img
|
145 |
-
|
146 |
-
def time_series_attention_sum(attentions, context_length, start_layer=0, end_layer=12):
|
147 |
-
import math
|
148 |
-
seq_len = attentions.shape[-1]
|
149 |
-
result = np.zeros(seq_len)
|
150 |
-
for attn in attentions[start_layer:end_layer]:
|
151 |
-
attn_heads = attn.mean(0).squeeze()
|
152 |
-
result += attn_heads
|
153 |
-
att_len = math.ceil(context_length/16)
|
154 |
-
return result[-att_len:]
|
155 |
-
|
156 |
-
def plot_time_series_heatmap(context, attention, time_steps):
|
157 |
-
plt.figure(figsize=(8, 4))
|
158 |
-
plt.plot(context, color="black", linewidth=2)
|
159 |
-
attention = attention/attention.max()
|
160 |
-
cmap = plt.get_cmap("coolwarm")
|
161 |
-
for i, v in enumerate(attention):
|
162 |
-
start = i * 16
|
163 |
-
end = min((i + 1) * 16, time_steps-1)
|
164 |
-
color = cmap(v)[:-1] + (v,)
|
165 |
-
plt.axvspan(start, end, color=color)
|
166 |
-
|
167 |
-
buf = io.BytesIO()
|
168 |
-
plt.savefig(buf, format='png')
|
169 |
-
buf.seek(0)
|
170 |
-
plot_img = Image.open(buf).convert('RGB')
|
171 |
-
plt.clf()
|
172 |
-
|
173 |
-
return plot_img
|
174 |
-
|
175 |
-
def predict(dataset, text, example_index, file, vision_encoder, text_encoder, tsfm, model_id):
|
176 |
-
|
177 |
-
if tsfm == "Custom" and model_id == "":
|
178 |
-
return (
|
179 |
-
gr.Markdown(
|
180 |
-
value=f"Please enter the hugging face model repo id.",
|
181 |
-
visible=True
|
182 |
-
),
|
183 |
-
None,
|
184 |
-
None,
|
185 |
-
None,
|
186 |
-
None
|
187 |
-
)
|
188 |
-
|
189 |
-
if (dataset is None or example_index is None) or (example_index == -1 and file is None):
|
190 |
-
return (
|
191 |
-
gr.Markdown(
|
192 |
-
value=f"Please Select Example or Provide CSV File.",
|
193 |
-
visible=True
|
194 |
-
),
|
195 |
-
None,
|
196 |
-
None,
|
197 |
-
None,
|
198 |
-
None
|
199 |
-
)
|
200 |
-
elif (vision_encoder is None or text_encoder is None or tsfm is None):
|
201 |
-
return (
|
202 |
-
gr.Markdown(
|
203 |
-
value=f"Please Select Pretrained Model For UniCast.",
|
204 |
-
visible=True
|
205 |
-
),
|
206 |
-
None,
|
207 |
-
None,
|
208 |
-
None,
|
209 |
-
None
|
210 |
-
)
|
211 |
-
else:
|
212 |
-
pass
|
213 |
-
if example_index == -1:
|
214 |
-
df = pd.read_csv(file.name)
|
215 |
-
df = df.iloc[:context_length[dataset]]
|
216 |
-
else:
|
217 |
-
df = inputs[dataset][example_index]
|
218 |
-
time_series = np.array(df["Value"])
|
219 |
-
mean = np.mean(time_series)
|
220 |
-
std = np.std(time_series)
|
221 |
-
time_series_normalized = (time_series-mean)/std
|
222 |
-
|
223 |
-
text = None if text == '' else text
|
224 |
-
|
225 |
-
unicast_model = f"{vision_encoder.lower()}{text_encoder.lower()}{tsfm.lower()}"
|
226 |
-
|
227 |
-
if tsfm == "Custom":
|
228 |
-
url = f"https://adnlp-multicast-custom.hf.space/predict"
|
229 |
-
headers = {"Authorization": f"Bearer {hf_token['multicastcustom']}"}
|
230 |
-
payload = {
|
231 |
-
"repo_id": model_id,
|
232 |
-
"dataset": dataset,
|
233 |
-
"context": time_series_normalized.tolist(),
|
234 |
-
"text": text
|
235 |
-
}
|
236 |
-
else:
|
237 |
-
url = f"https://adnlp-unicast-{unicast_model}.hf.space/predict"
|
238 |
-
headers = {"Authorization": f"Bearer {hf_token[unicast_model]}"}
|
239 |
-
payload = {
|
240 |
-
"dataset": dataset,
|
241 |
-
"context": time_series_normalized.tolist(),
|
242 |
-
"text": text
|
243 |
-
}
|
244 |
-
|
245 |
-
res = requests.post(url, headers=headers, json=payload)
|
246 |
-
res_json = res.json()
|
247 |
-
|
248 |
-
# Forecast Plot
|
249 |
-
prediction = np.array(res_json['prediction'])
|
250 |
-
cl = context_length[dataset]
|
251 |
-
prediction = prediction[:cl]
|
252 |
-
prediction = prediction*std+mean
|
253 |
-
|
254 |
-
input_dates_series = pd.to_datetime(df["Timestamp"])
|
255 |
-
time_diff = input_dates_series.diff().mode()[0]
|
256 |
-
start_time = input_dates_series.iloc[-1] + time_diff
|
257 |
-
forecast_dates_series = pd.date_range(start=start_time, periods=len(input_dates_series), freq=time_diff)
|
258 |
-
|
259 |
-
plt.close()
|
260 |
-
with plt.style.context("seaborn-v0_8"):
|
261 |
-
fig, ax = plt.subplots(figsize=(10,4))
|
262 |
-
ax.plot(input_dates_series, time_series, color="black", alpha=0.7, linewidth=3, label='Input')
|
263 |
-
ax.plot(forecast_dates_series, prediction, color='C2', alpha=0.7, linewidth=3, label='Forecast')
|
264 |
-
if example_index == -1: # Custom Input
|
265 |
-
true = df["Ground Truth"]
|
266 |
-
else:
|
267 |
-
true = targets[dataset][example_index].iloc[:, -1]
|
268 |
-
if len(true) == context_length[dataset]:
|
269 |
-
ax.plot(forecast_dates_series, true, color='C0', alpha=0.7, linewidth=3, label='Ground Truth')
|
270 |
-
ax.legend()
|
271 |
-
|
272 |
-
# Vision Heatmap
|
273 |
-
plt.figure(figsize=(384/100, 384/100), dpi=100)
|
274 |
-
plt.plot(time_series_normalized, color="black", linestyle="-", linewidth=1, marker="*", markersize=1)
|
275 |
-
plt.xticks([])
|
276 |
-
plt.yticks([])
|
277 |
-
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
|
278 |
-
plt.margins(0,0)
|
279 |
-
|
280 |
-
buf = io.BytesIO()
|
281 |
-
plt.savefig(buf, format='png')
|
282 |
-
buf.seek(0)
|
283 |
-
context_image = np.array(Image.open(buf).convert('RGB'))
|
284 |
-
|
285 |
-
vision_attentions = np.array(res_json['vision_attentions'])
|
286 |
-
vision_heatmap_gallery_items = []
|
287 |
-
for i in range(0, 12, 3):
|
288 |
-
vis_attn = vision_attention_rollout(vision_attentions, i, i+3)
|
289 |
-
vision_heatmap = plot_vision_heatmap(context_image, vis_attn)
|
290 |
-
vision_heatmap_gallery_items.append((vision_heatmap, f"Heatmap from Layer{i}:{i+3}"))
|
291 |
-
|
292 |
-
# Time Series Heatmap
|
293 |
-
if tsfm == "Chronos":
|
294 |
-
time_series_attentions = np.array(res_json['time_series_attentions'])
|
295 |
-
time_series_heatmap_gallery_items = []
|
296 |
-
for i in range(0, 12, 3):
|
297 |
-
ts_attn = time_series_attention_sum(time_series_attentions, cl, i, i+3)
|
298 |
-
time_series_heatmap = plot_time_series_heatmap(time_series, ts_attn, cl)
|
299 |
-
time_series_heatmap_gallery_items.append((time_series_heatmap, f"Heatmap from Layer{i}:{i+3}"))
|
300 |
-
else:
|
301 |
-
time_series_heatmap_gallery_items = None
|
302 |
-
|
303 |
-
return (
|
304 |
-
gr.Markdown(visible=False),
|
305 |
-
fig,
|
306 |
-
gr.Markdown("# Attention Map", visible=True),
|
307 |
-
gr.Gallery(vision_heatmap_gallery_items, interactive=False, height="350px", object_fit="contain", visible=True),
|
308 |
-
gr.Gallery(time_series_heatmap_gallery_items, interactive=False, height="350px", object_fit="contain", visible=True if time_series_heatmap_gallery_items else False)
|
309 |
-
)
|
310 |
-
|
311 |
-
def add_example_gallery(dataset, gallery, example_index, file):
|
312 |
-
if example_index == -1 and file:
|
313 |
-
df = pd.read_csv(file.name)
|
314 |
-
custom_input = df[["Timestamp", "Value"]]
|
315 |
-
custom_target = df[["Timestamp", "Ground Truth"]]
|
316 |
-
|
317 |
-
|
318 |
-
plt.style.use("seaborn-v0_8")
|
319 |
-
ax = custom_input.plot(x="Timestamp", color="black", linewidth=3, legend=False, x_compat=True)
|
320 |
-
ax.set_xlabel("")
|
321 |
-
# ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d %H:%M"))
|
322 |
-
buf = io.BytesIO()
|
323 |
-
plt.savefig(buf, format='png')
|
324 |
-
buf.seek(0)
|
325 |
-
plot_img = Image.open(buf).convert('RGB')
|
326 |
-
plt.clf()
|
327 |
-
gallery.insert(-1, (plot_img, f"Custom {len(gallery)-3}"))
|
328 |
-
|
329 |
-
inputs[dataset].append(custom_input)
|
330 |
-
targets[dataset].append(custom_target)
|
331 |
-
return gallery
|
332 |
-
|
333 |
-
def on_model_selection(selected):
|
334 |
-
return gr.update(visible=selected=="Custom")
|
335 |
-
|
336 |
-
custom_css = """
|
337 |
-
.two-col { display:flex; align-items:flex-end; gap: 16px; }
|
338 |
-
.right-col { display:flex; flex-direction:column; } /* optional */
|
339 |
-
.push-down { margin-top:auto; } /* optional */
|
340 |
-
.footer-fixed{
|
341 |
-
position: fixed; left:0; right:0; bottom:0;
|
342 |
-
font-size: 16px;
|
343 |
-
padding: 10px 16px; border-top: 1px solid var(--border-color);
|
344 |
-
background: var(--background-fill-primary); z-index: 1000;
|
345 |
-
display: flex; justify-content: flex-end; align-items: center; /* right align */
|
346 |
-
}
|
347 |
-
.blue-btn {
|
348 |
-
background-color: #024397 !important; /* Bootstrap-style blue */
|
349 |
-
color: white !important;
|
350 |
-
border-radius: 8px !important;
|
351 |
-
border: none !important;
|
352 |
-
padding: 8px 16px !important;
|
353 |
-
font-weight: 600;
|
354 |
-
}
|
355 |
-
.blue-btn:hover {
|
356 |
-
background-color: #0056b3 !important; /* Darker blue on hover */
|
357 |
-
}
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
display: flex
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
demo.launch(ssr_mode=False)
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import io
|
7 |
+
from PIL import Image
|
8 |
+
import pickle
|
9 |
+
import requests
|
10 |
+
import cv2
|
11 |
+
|
12 |
+
hf_token = {
|
13 |
+
"multicastcustom": os.environ["HF_MulTiCastCustom_Token"],
|
14 |
+
"clipqwentimer": os.environ["HF_CLIPQwenTimer_Token"],
|
15 |
+
"clipllamatimer": os.environ["HF_CLIPLLaMATimer_Token"],
|
16 |
+
"blipqwentimer": os.environ["HF_BLIPQwenTimer_Token"],
|
17 |
+
"blipllamatimer": os.environ["HF_BLIPLLaMATimer_Token"],
|
18 |
+
"clipqwenchronos": os.environ["HF_CLIPQwenChronos_Token"],
|
19 |
+
"clipllamachronos": os.environ["HF_CLIPLLaMAChronos_Token"],
|
20 |
+
"blipqwenchronos": os.environ["HF_BLIPQwenChronos_Token"],
|
21 |
+
"blipllamachronos": os.environ["HF_BLIPLLaMAChronos_Token"]
|
22 |
+
}
|
23 |
+
|
24 |
+
with open('example/inputs.pkl', 'rb') as f:
|
25 |
+
inputs = pickle.load(f)
|
26 |
+
|
27 |
+
with open('example/targets.pkl', 'rb') as f:
|
28 |
+
targets = pickle.load(f)
|
29 |
+
|
30 |
+
descriptions = {
|
31 |
+
"NN5 Daily": "Daily cash withdrawal volumes from automated teller machines (ATMs) in the United Kingdom, originally used in the NN5 forecasting competition.",
|
32 |
+
"Australian Electricity": "Half-hourly electricity demand data across five Australian states.",
|
33 |
+
"CIF 2016": "Monthly banking time series used in the CIF 2016 forecasting challenge, reflecting customer financial behaviours.",
|
34 |
+
"Tourism Monthly": "Monthly tourism-related time series used in the Kaggle Tourism forecasting competition, covering various regions and visitor types.",
|
35 |
+
"Custom": "Custom Dataset"
|
36 |
+
}
|
37 |
+
|
38 |
+
context_length = {
|
39 |
+
"NN5 Daily": 56,
|
40 |
+
"Australian Electricity": 48,
|
41 |
+
"CIF 2016": 12,
|
42 |
+
"Tourism Monthly": 24
|
43 |
+
}
|
44 |
+
|
45 |
+
def selected_dataset(dataset):
|
46 |
+
if dataset == "Custom":
|
47 |
+
gallery_items = []
|
48 |
+
else:
|
49 |
+
gallery_items = [(Image.open(f'example/img/{dataset.replace(" ", "_")}/{i}.png').convert('RGB'), str(i+1)) for i in range(3)]
|
50 |
+
|
51 |
+
gallery_items.append((Image.open('example/img/custom.png').convert('RGB'), 'Custom Input'))
|
52 |
+
|
53 |
+
return gr.Gallery(gallery_items, interactive=False, height="350px", object_fit="contain", preview=True), gr.Textbox(value=descriptions[dataset], label="Dataset Description", interactive=False)
|
54 |
+
|
55 |
+
def selected_example(gallery, evt: gr.SelectData):
|
56 |
+
if evt.index == len(gallery) -1:
|
57 |
+
return -1
|
58 |
+
else:
|
59 |
+
return evt.index
|
60 |
+
|
61 |
+
def update_guide_markdown(dataset, example_index):
|
62 |
+
if example_index is None:
|
63 |
+
return gr.Markdown(visible=False), gr.File(visible=False)
|
64 |
+
|
65 |
+
elif dataset == "Custom":
|
66 |
+
return gr.Markdown(visible=False), gr.File(visible=False)
|
67 |
+
|
68 |
+
elif example_index == -1: # Custom Input
|
69 |
+
return (
|
70 |
+
gr.Markdown(
|
71 |
+
value=f"To use custom input, please use the sample csv file below. Do not change the name of columns. Only the first {context_length[dataset]} values will be used as input time series.",
|
72 |
+
visible=True
|
73 |
+
),
|
74 |
+
gr.File(value="example/sample.csv", label="Sample CSV File", visible=True)
|
75 |
+
)
|
76 |
+
else:
|
77 |
+
df = inputs[dataset][example_index]
|
78 |
+
min = df.min()
|
79 |
+
max = df.max()
|
80 |
+
min_timestamp = pd.Series(min["Timestamp"]).to_string(index=False)
|
81 |
+
max_timestamp = pd.Series(max["Timestamp"]).to_string(index=False)
|
82 |
+
min_value = min["Value"]
|
83 |
+
max_value = max["Value"]
|
84 |
+
return (
|
85 |
+
gr.Markdown(
|
86 |
+
value=f"This time series contains values from {min_timestamp} to {max_timestamp}, with a minimum value of {min_value:.4f} and a maximum value of {max_value:.4f}.",
|
87 |
+
visible=True
|
88 |
+
),
|
89 |
+
gr.File(visible=False)
|
90 |
+
)
|
91 |
+
|
92 |
+
def update_time_series_dataframe(dataset, example_index):
|
93 |
+
if example_index is None:
|
94 |
+
return None, None
|
95 |
+
elif example_index == -1: # Custom Input
|
96 |
+
return gr.File(label="Time Series CSV File", file_types=[".csv"], visible=True), gr.Dataframe(value=None, visible=False)
|
97 |
+
elif dataset == "Custom":
|
98 |
+
return None, None
|
99 |
+
else:
|
100 |
+
df = inputs[dataset][example_index]
|
101 |
+
return gr.File(value=None, visible=False), gr.Dataframe(value=df, label="Time Series Input", interactive=False, visible=True)
|
102 |
+
|
103 |
+
def load_csv(example_index, file):
|
104 |
+
if example_index == -1:
|
105 |
+
if file is not None:
|
106 |
+
return gr.Dataframe(value=pd.read_csv(file.name), visible=True)
|
107 |
+
else:
|
108 |
+
return gr.Dataframe(value=None, visible=False)
|
109 |
+
else:
|
110 |
+
return gr.skip()
|
111 |
+
|
112 |
+
def vision_attention_rollout(attentions, start_layer=0, end_layer=12):
|
113 |
+
seq_len = attentions.shape[-1]
|
114 |
+
result = np.eye(seq_len)
|
115 |
+
|
116 |
+
for attn in attentions[start_layer:end_layer]:
|
117 |
+
attn_heads = attn.mean(axis=0)
|
118 |
+
attn_aug = attn_heads + np.eye(seq_len)
|
119 |
+
attn_aug = attn_aug / attn_aug.sum(axis=-1, keepdims=True)
|
120 |
+
result = attn_aug @ result
|
121 |
+
|
122 |
+
return result[0, -49:]
|
123 |
+
|
124 |
+
def plot_vision_heatmap(image, rollout_attention, alpha=0.5, cmap='jet'):
|
125 |
+
num_patches = rollout_attention.shape[0]
|
126 |
+
grid_size = int(np.sqrt(num_patches))
|
127 |
+
|
128 |
+
attn_grid = rollout_attention.reshape(grid_size, grid_size)
|
129 |
+
|
130 |
+
H, W = image.shape[:2]
|
131 |
+
attn_map = cv2.resize(attn_grid, (W, H), interpolation=cv2.INTER_CUBIC)
|
132 |
+
attn_map = attn_map / attn_map.max()
|
133 |
+
|
134 |
+
plt.figure(figsize=(6,6))
|
135 |
+
plt.imshow(image)
|
136 |
+
plt.imshow(attn_map, cmap=cmap, alpha=alpha)
|
137 |
+
plt.axis('off')
|
138 |
+
buf = io.BytesIO()
|
139 |
+
plt.savefig(buf, format='png')
|
140 |
+
buf.seek(0)
|
141 |
+
plot_img = Image.open(buf).convert('RGB')
|
142 |
+
plt.clf()
|
143 |
+
|
144 |
+
return plot_img
|
145 |
+
|
146 |
+
def time_series_attention_sum(attentions, context_length, start_layer=0, end_layer=12):
|
147 |
+
import math
|
148 |
+
seq_len = attentions.shape[-1]
|
149 |
+
result = np.zeros(seq_len)
|
150 |
+
for attn in attentions[start_layer:end_layer]:
|
151 |
+
attn_heads = attn.mean(0).squeeze()
|
152 |
+
result += attn_heads
|
153 |
+
att_len = math.ceil(context_length/16)
|
154 |
+
return result[-att_len:]
|
155 |
+
|
156 |
+
def plot_time_series_heatmap(context, attention, time_steps):
|
157 |
+
plt.figure(figsize=(8, 4))
|
158 |
+
plt.plot(context, color="black", linewidth=2)
|
159 |
+
attention = attention/attention.max()
|
160 |
+
cmap = plt.get_cmap("coolwarm")
|
161 |
+
for i, v in enumerate(attention):
|
162 |
+
start = i * 16
|
163 |
+
end = min((i + 1) * 16, time_steps-1)
|
164 |
+
color = cmap(v)[:-1] + (v,)
|
165 |
+
plt.axvspan(start, end, color=color)
|
166 |
+
|
167 |
+
buf = io.BytesIO()
|
168 |
+
plt.savefig(buf, format='png')
|
169 |
+
buf.seek(0)
|
170 |
+
plot_img = Image.open(buf).convert('RGB')
|
171 |
+
plt.clf()
|
172 |
+
|
173 |
+
return plot_img
|
174 |
+
|
175 |
+
def predict(dataset, text, example_index, file, vision_encoder, text_encoder, tsfm, model_id):
|
176 |
+
|
177 |
+
if tsfm == "Custom" and model_id == "":
|
178 |
+
return (
|
179 |
+
gr.Markdown(
|
180 |
+
value=f"Please enter the hugging face model repo id.",
|
181 |
+
visible=True
|
182 |
+
),
|
183 |
+
None,
|
184 |
+
None,
|
185 |
+
None,
|
186 |
+
None
|
187 |
+
)
|
188 |
+
|
189 |
+
if (dataset is None or example_index is None) or (example_index == -1 and file is None):
|
190 |
+
return (
|
191 |
+
gr.Markdown(
|
192 |
+
value=f"Please Select Example or Provide CSV File.",
|
193 |
+
visible=True
|
194 |
+
),
|
195 |
+
None,
|
196 |
+
None,
|
197 |
+
None,
|
198 |
+
None
|
199 |
+
)
|
200 |
+
elif (vision_encoder is None or text_encoder is None or tsfm is None):
|
201 |
+
return (
|
202 |
+
gr.Markdown(
|
203 |
+
value=f"Please Select Pretrained Model For UniCast.",
|
204 |
+
visible=True
|
205 |
+
),
|
206 |
+
None,
|
207 |
+
None,
|
208 |
+
None,
|
209 |
+
None
|
210 |
+
)
|
211 |
+
else:
|
212 |
+
pass
|
213 |
+
if example_index == -1:
|
214 |
+
df = pd.read_csv(file.name)
|
215 |
+
df = df.iloc[:context_length[dataset]]
|
216 |
+
else:
|
217 |
+
df = inputs[dataset][example_index]
|
218 |
+
time_series = np.array(df["Value"])
|
219 |
+
mean = np.mean(time_series)
|
220 |
+
std = np.std(time_series)
|
221 |
+
time_series_normalized = (time_series-mean)/std
|
222 |
+
|
223 |
+
text = None if text == '' else text
|
224 |
+
|
225 |
+
unicast_model = f"{vision_encoder.lower()}{text_encoder.lower()}{tsfm.lower()}"
|
226 |
+
|
227 |
+
if tsfm == "Custom":
|
228 |
+
url = f"https://adnlp-multicast-custom.hf.space/predict"
|
229 |
+
headers = {"Authorization": f"Bearer {hf_token['multicastcustom']}"}
|
230 |
+
payload = {
|
231 |
+
"repo_id": model_id,
|
232 |
+
"dataset": dataset,
|
233 |
+
"context": time_series_normalized.tolist(),
|
234 |
+
"text": text
|
235 |
+
}
|
236 |
+
else:
|
237 |
+
url = f"https://adnlp-unicast-{unicast_model}.hf.space/predict"
|
238 |
+
headers = {"Authorization": f"Bearer {hf_token[unicast_model]}"}
|
239 |
+
payload = {
|
240 |
+
"dataset": dataset,
|
241 |
+
"context": time_series_normalized.tolist(),
|
242 |
+
"text": text
|
243 |
+
}
|
244 |
+
|
245 |
+
res = requests.post(url, headers=headers, json=payload)
|
246 |
+
res_json = res.json()
|
247 |
+
|
248 |
+
# Forecast Plot
|
249 |
+
prediction = np.array(res_json['prediction'])
|
250 |
+
cl = context_length[dataset]
|
251 |
+
prediction = prediction[:cl]
|
252 |
+
prediction = prediction*std+mean
|
253 |
+
|
254 |
+
input_dates_series = pd.to_datetime(df["Timestamp"])
|
255 |
+
time_diff = input_dates_series.diff().mode()[0]
|
256 |
+
start_time = input_dates_series.iloc[-1] + time_diff
|
257 |
+
forecast_dates_series = pd.date_range(start=start_time, periods=len(input_dates_series), freq=time_diff)
|
258 |
+
|
259 |
+
plt.close()
|
260 |
+
with plt.style.context("seaborn-v0_8"):
|
261 |
+
fig, ax = plt.subplots(figsize=(10,4))
|
262 |
+
ax.plot(input_dates_series, time_series, color="black", alpha=0.7, linewidth=3, label='Input')
|
263 |
+
ax.plot(forecast_dates_series, prediction, color='C2', alpha=0.7, linewidth=3, label='Forecast')
|
264 |
+
if example_index == -1: # Custom Input
|
265 |
+
true = df["Ground Truth"]
|
266 |
+
else:
|
267 |
+
true = targets[dataset][example_index].iloc[:, -1]
|
268 |
+
if len(true) == context_length[dataset]:
|
269 |
+
ax.plot(forecast_dates_series, true, color='C0', alpha=0.7, linewidth=3, label='Ground Truth')
|
270 |
+
ax.legend()
|
271 |
+
|
272 |
+
# Vision Heatmap
|
273 |
+
plt.figure(figsize=(384/100, 384/100), dpi=100)
|
274 |
+
plt.plot(time_series_normalized, color="black", linestyle="-", linewidth=1, marker="*", markersize=1)
|
275 |
+
plt.xticks([])
|
276 |
+
plt.yticks([])
|
277 |
+
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
|
278 |
+
plt.margins(0,0)
|
279 |
+
|
280 |
+
buf = io.BytesIO()
|
281 |
+
plt.savefig(buf, format='png')
|
282 |
+
buf.seek(0)
|
283 |
+
context_image = np.array(Image.open(buf).convert('RGB'))
|
284 |
+
|
285 |
+
vision_attentions = np.array(res_json['vision_attentions'])
|
286 |
+
vision_heatmap_gallery_items = []
|
287 |
+
for i in range(0, 12, 3):
|
288 |
+
vis_attn = vision_attention_rollout(vision_attentions, i, i+3)
|
289 |
+
vision_heatmap = plot_vision_heatmap(context_image, vis_attn)
|
290 |
+
vision_heatmap_gallery_items.append((vision_heatmap, f"Heatmap from Layer{i}:{i+3}"))
|
291 |
+
|
292 |
+
# Time Series Heatmap
|
293 |
+
if tsfm == "Chronos":
|
294 |
+
time_series_attentions = np.array(res_json['time_series_attentions'])
|
295 |
+
time_series_heatmap_gallery_items = []
|
296 |
+
for i in range(0, 12, 3):
|
297 |
+
ts_attn = time_series_attention_sum(time_series_attentions, cl, i, i+3)
|
298 |
+
time_series_heatmap = plot_time_series_heatmap(time_series, ts_attn, cl)
|
299 |
+
time_series_heatmap_gallery_items.append((time_series_heatmap, f"Heatmap from Layer{i}:{i+3}"))
|
300 |
+
else:
|
301 |
+
time_series_heatmap_gallery_items = None
|
302 |
+
|
303 |
+
return (
|
304 |
+
gr.Markdown(visible=False),
|
305 |
+
fig,
|
306 |
+
gr.Markdown("# Attention Map", visible=True),
|
307 |
+
gr.Gallery(vision_heatmap_gallery_items, interactive=False, height="350px", object_fit="contain", visible=True),
|
308 |
+
gr.Gallery(time_series_heatmap_gallery_items, interactive=False, height="350px", object_fit="contain", visible=True if time_series_heatmap_gallery_items else False)
|
309 |
+
)
|
310 |
+
|
311 |
+
def add_example_gallery(dataset, gallery, example_index, file):
|
312 |
+
if example_index == -1 and file:
|
313 |
+
df = pd.read_csv(file.name)
|
314 |
+
custom_input = df[["Timestamp", "Value"]]
|
315 |
+
custom_target = df[["Timestamp", "Ground Truth"]]
|
316 |
+
|
317 |
+
|
318 |
+
plt.style.use("seaborn-v0_8")
|
319 |
+
ax = custom_input.plot(x="Timestamp", color="black", linewidth=3, legend=False, x_compat=True)
|
320 |
+
ax.set_xlabel("")
|
321 |
+
# ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d %H:%M"))
|
322 |
+
buf = io.BytesIO()
|
323 |
+
plt.savefig(buf, format='png')
|
324 |
+
buf.seek(0)
|
325 |
+
plot_img = Image.open(buf).convert('RGB')
|
326 |
+
plt.clf()
|
327 |
+
gallery.insert(-1, (plot_img, f"Custom {len(gallery)-3}"))
|
328 |
+
|
329 |
+
inputs[dataset].append(custom_input)
|
330 |
+
targets[dataset].append(custom_target)
|
331 |
+
return gallery
|
332 |
+
|
333 |
+
def on_model_selection(selected):
|
334 |
+
return gr.update(visible=selected=="Custom")
|
335 |
+
|
336 |
+
custom_css = """
|
337 |
+
.two-col { display:flex; align-items:flex-end; gap: 16px; }
|
338 |
+
.right-col { display:flex; flex-direction:column; } /* optional */
|
339 |
+
.push-down { margin-top:auto; } /* optional */
|
340 |
+
.footer-fixed{
|
341 |
+
position: fixed; left:0; right:0; bottom:0;
|
342 |
+
font-size: 16px;
|
343 |
+
padding: 10px 16px; border-top: 1px solid var(--border-color);
|
344 |
+
background: var(--background-fill-primary); z-index: 1000;
|
345 |
+
display: flex; justify-content: flex-end; align-items: center; /* right align */
|
346 |
+
}
|
347 |
+
.blue-btn {
|
348 |
+
background-color: #024397 !important; /* Bootstrap-style blue */
|
349 |
+
color: white !important;
|
350 |
+
border-radius: 8px !important;
|
351 |
+
border: none !important;
|
352 |
+
padding: 8px 16px !important;
|
353 |
+
font-weight: 600;
|
354 |
+
}
|
355 |
+
.blue-btn:hover {
|
356 |
+
background-color: #0056b3 !important; /* Darker blue on hover */
|
357 |
+
}
|
358 |
+
.app-description{
|
359 |
+
font-size: 16px;
|
360 |
+
}
|
361 |
+
|
362 |
+
"""
|
363 |
+
|
364 |
+
with gr.Blocks(css=custom_css) as demo:
|
365 |
+
|
366 |
+
gr.HTML("""
|
367 |
+
<style>
|
368 |
+
#logo {
|
369 |
+
display: flex;
|
370 |
+
justify-content: flex-start;
|
371 |
+
}
|
372 |
+
.gallery-container .grid-container {
|
373 |
+
display: flex !important;
|
374 |
+
}
|
375 |
+
</style>
|
376 |
+
""")
|
377 |
+
gr.Image(
|
378 |
+
value="logo.png",
|
379 |
+
show_label=False,
|
380 |
+
show_download_button=False,
|
381 |
+
show_fullscreen_button=False,
|
382 |
+
show_share_button=False,
|
383 |
+
interactive=False,
|
384 |
+
height=128,
|
385 |
+
container=False,
|
386 |
+
elem_id="logo"
|
387 |
+
)
|
388 |
+
with gr.Row(elem_classes=["two-col"]):
|
389 |
+
with gr.Column(scale=2):
|
390 |
+
gr.Markdown("<b>MulTiCast</b>, based on <a href='https://github.com/adlnlp/unicast'><b>UniCast</b></a>, is designed as a web-based system that allows users to perform multimodal time-series forecasting without technical setup. The system integrates a numerical time-series forecasting backbone with vision and text encoders. It exposes these capabilities through a lightweight but fully interactive web interface hosted on Hugging Face Spaces. Its design focuses on lowering the barrier to entry while ensuring interpretability through attention-based visualizations. The project and demo are sponsored by <a href='https://research.google/'>Google Research</a>", elem_classes=["app-description"])
|
391 |
+
|
392 |
+
with gr.Row():
|
393 |
+
with gr.Column(scale=1):
|
394 |
+
gr.Markdown("# Choose Dataset")
|
395 |
+
dataset_choices = ["NN5 Daily", "Australian Electricity", "Custom"]
|
396 |
+
dataset_dropdown = gr.Dropdown(dataset_choices, value=None, label="Datasets", interactive=True)
|
397 |
+
dataset_description_textbox = gr.Textbox(label="Dataset Description", interactive=False)
|
398 |
+
|
399 |
+
with gr.Column(scale=3):
|
400 |
+
gr.Markdown("# Data Selection")
|
401 |
+
example_gallery = gr.Gallery(
|
402 |
+
None,
|
403 |
+
interactive=False
|
404 |
+
)
|
405 |
+
example_index = gr.State(value=None)
|
406 |
+
example_gallery.select(selected_example, inputs=example_gallery, outputs=example_index)
|
407 |
+
|
408 |
+
guide_text_markdown = gr.Markdown(visible=False)
|
409 |
+
sample_csv_file = gr.File(visible=False)
|
410 |
+
|
411 |
+
with gr.Row(elem_classes=["two-col"]):
|
412 |
+
with gr.Column(scale=3):
|
413 |
+
gr.Markdown("# Data Viewer")
|
414 |
+
time_series_file = gr.File(value=None, visible=False)
|
415 |
+
time_series_dataframe = gr.Dataframe(visible=False)
|
416 |
+
|
417 |
+
dataset_dropdown.change(selected_dataset, inputs=dataset_dropdown, outputs=[example_gallery, dataset_description_textbox])
|
418 |
+
dataset_dropdown.change(update_guide_markdown, inputs=[dataset_dropdown, example_index], outputs=[guide_text_markdown, sample_csv_file])
|
419 |
+
dataset_dropdown.change(update_time_series_dataframe, inputs=[dataset_dropdown, example_index], outputs=[time_series_file, time_series_dataframe])
|
420 |
+
example_index.change(update_guide_markdown, inputs=[dataset_dropdown, example_index], outputs=[guide_text_markdown, sample_csv_file])
|
421 |
+
example_index.change(update_time_series_dataframe, inputs=[dataset_dropdown, example_index], outputs=[time_series_file, time_series_dataframe])
|
422 |
+
|
423 |
+
time_series_file.change(load_csv, inputs=[example_index, time_series_file], outputs=time_series_dataframe)
|
424 |
+
|
425 |
+
with gr.Column(scale=1):
|
426 |
+
|
427 |
+
gr.Markdown("# Model Selection")
|
428 |
+
model_choices = ["Timer", "Chronos", "Custom"]
|
429 |
+
tsfm_radio = gr.Radio(model_choices, label="Time Series Foundation Model")
|
430 |
+
md_choices = gr.State(model_choices)
|
431 |
+
|
432 |
+
model_id_box = gr.Textbox(placeholder="Type and Enter…", label="HF Model ID", interactive=True, visible=False)
|
433 |
+
# model_token_box = gr.Textbox(placeholder="Type and Enter…", label="HF Model Token", interactive=True, visible=False)
|
434 |
+
|
435 |
+
vision_encoder_radio = gr.Radio(["CLIP", "BLIP"], label="Vision Encoder")
|
436 |
+
text_encoder_radio = gr.Radio(["Qwen", "LLaMA"], label="Text Encoder")
|
437 |
+
warning_markdown = gr.Markdown(visible=False)
|
438 |
+
btn = gr.Button("Run", elem_classes="blue-btn")
|
439 |
+
|
440 |
+
tsfm_radio.change(on_model_selection, [tsfm_radio], model_id_box)
|
441 |
+
# tsfm_radio.change(on_model_selection, [tsfm_radio], model_token_box)
|
442 |
+
|
443 |
+
|
444 |
+
with gr.Row():
|
445 |
+
with gr.Column(scale=2):
|
446 |
+
gr.Markdown("# Prediction")
|
447 |
+
forecast_plot = gr.Plot(label="Forecast", format="png")
|
448 |
+
heatmap_header_html = gr.Markdown("# Attention Map", visible=False)
|
449 |
+
vision_heatmap_gallery = gr.Gallery(visible=False)
|
450 |
+
time_series_heatmap_gallery = gr.Gallery(visible=False)
|
451 |
+
|
452 |
+
btn.click(predict, inputs=[dataset_dropdown, dataset_description_textbox, example_index, time_series_file, vision_encoder_radio, text_encoder_radio, tsfm_radio, model_id_box], outputs=[warning_markdown, forecast_plot, heatmap_header_html, vision_heatmap_gallery, time_series_heatmap_gallery])
|
453 |
+
btn.click(add_example_gallery, inputs=[dataset_dropdown, example_gallery, example_index, time_series_file], outputs=[example_gallery])
|
454 |
+
|
455 |
+
gr.HTML("<small>This work is sponsored by Google Research</small>", elem_classes=["footer-fixed"])
|
456 |
+
|
457 |
+
|
458 |
+
if __name__ == "__main__":
|
459 |
demo.launch(ssr_mode=False)
|