Datasets:
Benjamin Ernhofer
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -74,7 +74,402 @@ Key Features:
|
|
74 |
- Apple CarPlay: 13
|
75 |
- Google Android Auto: 7
|
76 |
|
|
|
|
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
|
80 |
## Results
|
@@ -87,3 +482,7 @@ Key Features:
|
|
87 |
| Molmo-7B-D-0924 | 71.3 | 71.4 | 66.9 |
|
88 |
| LAM-270M (TinyClick) | 73.9 | 59.9 | - |
|
89 |
| ELAM-7B (Molmo) | **87.6** | **77.5** | **78.2** |
|
|
|
|
|
|
|
|
|
|
74 |
- Apple CarPlay: 13
|
75 |
- Google Android Auto: 7
|
76 |
|
77 |
+
## Usage
|
78 |
+
Corresponding model [ELAM](https://huggingface.co/sparks-solutions/ELAM-7B) is available on Hugging Face as well.
|
79 |
|
80 |
+
<details>
|
81 |
+
<summary>Setup Environment for ELAM-7B</summary>
|
82 |
+
|
83 |
+
```
|
84 |
+
conda create -n elam python=3.10 -y
|
85 |
+
conda activate elam
|
86 |
+
pip install datasets==3.5.0 einops==0.8.1 torchvision==0.20.1 accelerate==1.6.0
|
87 |
+
pip install transformers==4.48.2
|
88 |
+
```
|
89 |
+
</details>
|
90 |
+
|
91 |
+
<details>
|
92 |
+
<summary>Dataloading and Inference with ELAM-7B</summary>
|
93 |
+
|
94 |
+
```python
|
95 |
+
# Run inference on AutomotiveUI-4k dataset on local GPU
|
96 |
+
# Outputs will be written in a JSONL file
|
97 |
+
import json
|
98 |
+
import os
|
99 |
+
import time
|
100 |
+
|
101 |
+
import torch
|
102 |
+
from datasets import Dataset, load_dataset
|
103 |
+
from tqdm import tqdm
|
104 |
+
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
105 |
+
|
106 |
+
|
107 |
+
def preprocess_prompt_elam(user_request: str, label_class: str) -> str:
|
108 |
+
"""Apply ELAM prompt template depending on class."""
|
109 |
+
if label_class == "Expected Result":
|
110 |
+
return f"Evaluate this statement about the image:\n'{user_request}'\nThink step by step, conclude whether the evaluation is 'PASSED' or 'FAILED' and point to the UI element that corresponds to this evaluation."
|
111 |
+
elif label_class == "Test Action":
|
112 |
+
return f"Identify and point to the UI element that corresponds to this test action:\n{user_request}"
|
113 |
+
else:
|
114 |
+
raise ValueError()
|
115 |
+
|
116 |
+
|
117 |
+
def append_to_jsonl_file(data: dict, target_path: str) -> None:
|
118 |
+
assert str(target_path).endswith(".jsonl")
|
119 |
+
with open(target_path, "a", encoding="utf-8") as file:
|
120 |
+
file.write(f"{json.dumps(data, ensure_ascii=False)}\n")
|
121 |
+
|
122 |
+
|
123 |
+
def run_inference(dataset: Dataset, model: AutoModelForCausalLM, processor: AutoProcessor):
|
124 |
+
# Define output dir and file
|
125 |
+
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
126 |
+
DEBUG_DIR = os.path.join("eval_output", timestamp)
|
127 |
+
model_outputs_path = os.path.join(DEBUG_DIR, f"model_outputs.jsonl")
|
128 |
+
|
129 |
+
print(f"Writing data to: {model_outputs_path}")
|
130 |
+
for sample_id, sample in enumerate(tqdm(dataset, desc="Processing")):
|
131 |
+
image = sample["image"]
|
132 |
+
|
133 |
+
gt_box = sample["box"][0]
|
134 |
+
label_class = sample["class"]
|
135 |
+
|
136 |
+
# read gt box
|
137 |
+
utterance = None
|
138 |
+
gt_status = None
|
139 |
+
if "Expected Result" == label_class:
|
140 |
+
utterance = sample["expectation"]
|
141 |
+
gt_status = sample["conclusion"].upper()
|
142 |
+
|
143 |
+
elif "Test Action" == label_class:
|
144 |
+
utterance = sample["test_action"]
|
145 |
+
else:
|
146 |
+
raise ValueError(f"Did not find valid utterance for image #{sample_id}.")
|
147 |
+
assert utterance
|
148 |
+
|
149 |
+
# Apply prompt template
|
150 |
+
rephrased_utterance = preprocess_prompt_elam(utterance, label_class)
|
151 |
+
|
152 |
+
# Process the image and text
|
153 |
+
inputs = processor.process(
|
154 |
+
images=[image],
|
155 |
+
text=rephrased_utterance,
|
156 |
+
)
|
157 |
+
|
158 |
+
# Move inputs to the correct device and make a batch of size 1, cast to bfloat16
|
159 |
+
inputs_bfloat16 = {}
|
160 |
+
for k, v in inputs.items():
|
161 |
+
if v.dtype == torch.float32:
|
162 |
+
inputs_bfloat16[k] = v.to(model.device).to(torch.bfloat16).unsqueeze(0)
|
163 |
+
else:
|
164 |
+
inputs_bfloat16[k] = v.to(model.device).unsqueeze(0)
|
165 |
+
|
166 |
+
inputs = inputs_bfloat16 # Replace original inputs with the correctly typed inputs
|
167 |
+
|
168 |
+
# Generate output
|
169 |
+
output = model.generate_from_batch(
|
170 |
+
inputs, GenerationConfig(max_new_tokens=2048, stop_strings="<|endoftext|>"), tokenizer=processor.tokenizer
|
171 |
+
)
|
172 |
+
|
173 |
+
# Only get generated tokens; decode them to text
|
174 |
+
generated_tokens = output[0, inputs["input_ids"].size(1) :]
|
175 |
+
response = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
176 |
+
|
177 |
+
# write current image with current label
|
178 |
+
os.makedirs(DEBUG_DIR, exist_ok=True)
|
179 |
+
|
180 |
+
# append line to jsonl
|
181 |
+
model_output_line = {
|
182 |
+
"sample_id": sample_id,
|
183 |
+
"input": rephrased_utterance,
|
184 |
+
"output": response,
|
185 |
+
"image_size": image.size,
|
186 |
+
"gt_class": label_class,
|
187 |
+
"gt_box": gt_box,
|
188 |
+
"gt_status": gt_status,
|
189 |
+
"language": sample["language"],
|
190 |
+
}
|
191 |
+
append_to_jsonl_file(model_output_line, target_path=model_outputs_path)
|
192 |
+
|
193 |
+
|
194 |
+
if __name__ == "__main__":
|
195 |
+
# Set dataset
|
196 |
+
dataset = load_dataset("sparks-solutions/AutomotiveUI-Bench-4K")["test"]
|
197 |
+
|
198 |
+
# Load the processor
|
199 |
+
model_name = "sparks-solutions/ELAM-7B"
|
200 |
+
processor = AutoProcessor.from_pretrained(
|
201 |
+
model_name, trust_remote_code=True, torch_dtype="bfloat16", device_map="auto"
|
202 |
+
)
|
203 |
+
|
204 |
+
# Load the model
|
205 |
+
model = AutoModelForCausalLM.from_pretrained(
|
206 |
+
model_name, trust_remote_code=True, torch_dtype="bfloat16", device_map="auto"
|
207 |
+
)
|
208 |
+
run_inference(dataset=dataset, processor=processor, model=model)
|
209 |
+
|
210 |
+
```
|
211 |
+
</details>
|
212 |
+
|
213 |
+
|
214 |
+
<details>
|
215 |
+
<summary>Parsing results and calculating metrics</summary>
|
216 |
+
|
217 |
+
```python
|
218 |
+
import argparse
|
219 |
+
import json
|
220 |
+
import re
|
221 |
+
from pathlib import Path
|
222 |
+
from typing import Tuple
|
223 |
+
|
224 |
+
import numpy as np
|
225 |
+
|
226 |
+
|
227 |
+
def read_jsonl_file(path: str) -> list:
|
228 |
+
assert str(path).endswith(".jsonl")
|
229 |
+
data_list = []
|
230 |
+
with open(path, "r", encoding="utf-8") as file:
|
231 |
+
for line in file:
|
232 |
+
data = json.loads(line)
|
233 |
+
data_list.append(data)
|
234 |
+
return data_list
|
235 |
+
|
236 |
+
|
237 |
+
def write_json_file(data: dict | list, path: str) -> None:
|
238 |
+
assert str(path).endswith(".json")
|
239 |
+
with open(path, "w", encoding="utf-8") as outfile:
|
240 |
+
json.dump(data, outfile, ensure_ascii=False, indent=4)
|
241 |
+
|
242 |
+
|
243 |
+
def postprocess_response_elam(response: str) -> Tuple[float, float]:
|
244 |
+
"""Parse Molmo-style point coordinates from string."""
|
245 |
+
pattern = r'<point x="(?P<x>\d+\.\d+)" y="(?P<y>\d+\.\d+)"'
|
246 |
+
match = re.search(pattern, response)
|
247 |
+
if match:
|
248 |
+
x_coord_raw = float(match.group("x"))
|
249 |
+
y_coord_raw = float(match.group("y"))
|
250 |
+
x_coord = x_coord_raw / 100
|
251 |
+
y_coord = y_coord_raw / 100
|
252 |
+
return [x_coord, y_coord]
|
253 |
+
else:
|
254 |
+
return [-1, -1]
|
255 |
+
|
256 |
+
|
257 |
+
def pred_center_in_gt(predicted_boxes, ground_truth_boxes):
|
258 |
+
"""Calculate the percentage of predictions where the predicted center is in the ground truth box and return the indices where it is not.
|
259 |
+
|
260 |
+
Args:
|
261 |
+
predicted_boxes (np.ndarray): shape (n, 4) of top-left bottom-right boxes or predicted points
|
262 |
+
ground_truth_boxes (np.ndarray): shape (n, 4) of top-left bottom-right boxes
|
263 |
+
|
264 |
+
Returns:
|
265 |
+
float: percentage of predictions where the predicted center is in the ground truth box
|
266 |
+
list: indices of predictions where the center is not in the ground truth box
|
267 |
+
"""
|
268 |
+
if ground_truth_boxes.size == 0: # Check for empty numpy array just to be explicit
|
269 |
+
return -1
|
270 |
+
if predicted_boxes.shape[1] == 2:
|
271 |
+
predicted_centers = predicted_boxes
|
272 |
+
else:
|
273 |
+
# Calculate the centers of the bounding boxes
|
274 |
+
predicted_centers = (predicted_boxes[:, :2] + predicted_boxes[:, 2:]) / 2
|
275 |
+
|
276 |
+
# Check if predicted centers are within ground truth boxes
|
277 |
+
within_gt = (
|
278 |
+
(predicted_centers[:, 0] >= ground_truth_boxes[:, 0])
|
279 |
+
& (predicted_centers[:, 0] <= ground_truth_boxes[:, 2])
|
280 |
+
& (predicted_centers[:, 1] >= ground_truth_boxes[:, 1])
|
281 |
+
& (predicted_centers[:, 1] <= ground_truth_boxes[:, 3])
|
282 |
+
)
|
283 |
+
|
284 |
+
return within_gt
|
285 |
+
|
286 |
+
|
287 |
+
def to_mean_percent(metrics: list | np.ndarray) -> float:
|
288 |
+
"""Calculate mean of array and multiply by 100."""
|
289 |
+
return np.mean(metrics) * 100
|
290 |
+
|
291 |
+
|
292 |
+
def calculate_alignment_numpy(array1, array2):
|
293 |
+
"""Returns boolean array where values are equal"""
|
294 |
+
|
295 |
+
if array1.size == 0: # Check for empty numpy array just to be explicit
|
296 |
+
return [], [], []
|
297 |
+
|
298 |
+
# Overall Accuracy
|
299 |
+
overall_hits = array1 == array2
|
300 |
+
|
301 |
+
# True Ground Truth Accuracy
|
302 |
+
true_ground_truth_indices = array2 == True # Boolean mask for True ground truth
|
303 |
+
true_ground_truth_predictions = array1[true_ground_truth_indices]
|
304 |
+
true_ground_truth_actuals = array2[true_ground_truth_indices]
|
305 |
+
|
306 |
+
true_gt_hits = true_ground_truth_predictions == true_ground_truth_actuals
|
307 |
+
|
308 |
+
# False Ground Truth Accuracy
|
309 |
+
false_ground_truth_indices = array2 == False # Boolean mask for False ground truth
|
310 |
+
false_ground_truth_predictions = array1[false_ground_truth_indices]
|
311 |
+
false_ground_truth_actuals = array2[false_ground_truth_indices]
|
312 |
+
|
313 |
+
false_gt_hits = false_ground_truth_predictions == false_ground_truth_actuals
|
314 |
+
return overall_hits, true_gt_hits, false_gt_hits
|
315 |
+
|
316 |
+
|
317 |
+
def clip_non_minus_one(arr):
|
318 |
+
"""Clips values in a NumPy array to [0, 1] but leaves -1 values unchanged."""
|
319 |
+
# Create a boolean mask for values NOT equal to -1
|
320 |
+
mask = arr != -1
|
321 |
+
|
322 |
+
# Create a copy of the array to avoid modifying the original in-place
|
323 |
+
clipped_arr = np.copy(arr)
|
324 |
+
|
325 |
+
# Apply clipping ONLY to the elements where the mask is True
|
326 |
+
clipped_arr[mask] = np.clip(clipped_arr[mask], 0, 1)
|
327 |
+
|
328 |
+
return clipped_arr
|
329 |
+
|
330 |
+
|
331 |
+
if __name__ == "__main__":
|
332 |
+
parser = argparse.ArgumentParser(description="Run model inference and save outputs.")
|
333 |
+
parser.add_argument(
|
334 |
+
"-m", "--model_output_path", type=str, help="Path to json that contains model outputs from eval.", required=True
|
335 |
+
)
|
336 |
+
|
337 |
+
args = parser.parse_args()
|
338 |
+
|
339 |
+
EVAL_PATH = args.model_output_path
|
340 |
+
eval_jsonl_data = read_jsonl_file(EVAL_PATH)
|
341 |
+
|
342 |
+
ta_pred_bboxes, ta_gt_bboxes = [], []
|
343 |
+
er_pred_bboxes, er_gt_bboxes = [], []
|
344 |
+
er_pred_conclusion, er_gt_conclusion = [], []
|
345 |
+
ta_out_images, er_out_images = [], []
|
346 |
+
failed_pred_responses = []
|
347 |
+
|
348 |
+
er_en_ids = []
|
349 |
+
ta_en_ids = []
|
350 |
+
ta_de_ids = []
|
351 |
+
er_de_ids = []
|
352 |
+
|
353 |
+
for line in eval_jsonl_data:
|
354 |
+
# Read data from line
|
355 |
+
image_width, image_height = line["image_size"]
|
356 |
+
gt_box = line["gt_box"]
|
357 |
+
lang = line["language"]
|
358 |
+
response_raw = line["output"]
|
359 |
+
|
360 |
+
if "Test Action" == line["gt_class"]:
|
361 |
+
# Parse point/box from response and clip to image
|
362 |
+
parsed_response = postprocess_response_elam(response_raw)
|
363 |
+
if parsed_response[0] == -1:
|
364 |
+
failed_pred_responses.append({"sample_id": line["sample_id"], "response": response_raw})
|
365 |
+
|
366 |
+
parsed_response = np.array(parsed_response)
|
367 |
+
parsed_response = clip_non_minus_one(parsed_response).tolist()
|
368 |
+
|
369 |
+
# Append results
|
370 |
+
ta_gt_bboxes.append(gt_box)
|
371 |
+
ta_pred_bboxes.append(parsed_response)
|
372 |
+
if lang == "DE":
|
373 |
+
ta_de_ids.append(len(ta_pred_bboxes) - 1) # append id
|
374 |
+
elif lang == "EN":
|
375 |
+
ta_en_ids.append(len(ta_pred_bboxes) - 1)
|
376 |
+
|
377 |
+
elif "Expected Result" in line["gt_class"]:
|
378 |
+
er_gt_bboxes.append(gt_box)
|
379 |
+
|
380 |
+
# Parse point/box from response and clip to image
|
381 |
+
parsed_response = postprocess_response_elam(response_raw)
|
382 |
+
if parsed_response[0] == -1:
|
383 |
+
failed_pred_responses.append({"sample_id": line["sample_id"], "response": response_raw})
|
384 |
+
parsed_response = np.array(parsed_response)
|
385 |
+
parsed_response = clip_non_minus_one(parsed_response).tolist()
|
386 |
+
er_pred_bboxes.append(parsed_response)
|
387 |
+
|
388 |
+
# Read evaluation conclusion
|
389 |
+
gt_conclusion = line["gt_status"].upper()
|
390 |
+
gt_conclusion = True if gt_conclusion == "PASSED" else False
|
391 |
+
|
392 |
+
pred_conclusion = None
|
393 |
+
if "FAILED" in response_raw or "is not met" in response_raw:
|
394 |
+
pred_conclusion = False
|
395 |
+
elif "PASSED" in response_raw or "is met" in response_raw:
|
396 |
+
pred_conclusion = True
|
397 |
+
if pred_conclusion is None:
|
398 |
+
# Make prediction wrong if it couldn't be parsed
|
399 |
+
pred_conclusion = not gt_conclusion
|
400 |
+
|
401 |
+
er_gt_conclusion.append(gt_conclusion)
|
402 |
+
er_pred_conclusion.append(pred_conclusion)
|
403 |
+
|
404 |
+
if lang == "DE":
|
405 |
+
er_de_ids.append(len(er_pred_bboxes) - 1)
|
406 |
+
elif lang == "EN":
|
407 |
+
er_en_ids.append(len(er_pred_bboxes) - 1)
|
408 |
+
|
409 |
+
ta_pred_bboxes = np.array(ta_pred_bboxes)
|
410 |
+
ta_gt_bboxes = np.array(ta_gt_bboxes)
|
411 |
+
er_pred_bboxes = np.array(er_pred_bboxes)
|
412 |
+
er_gt_bboxes = np.array(er_gt_bboxes)
|
413 |
+
er_pred_conclusion = np.array(er_pred_conclusion)
|
414 |
+
er_gt_conclusion = np.array(er_gt_conclusion)
|
415 |
+
print(f"{'Test action (pred/gt):':<{36}}{ta_pred_bboxes.shape}, {ta_gt_bboxes.shape}")
|
416 |
+
print(f"{'Expected results (pred/gt):':<{36}}{er_pred_bboxes.shape}, {er_gt_bboxes.shape}")
|
417 |
+
|
418 |
+
# Calculate metrics
|
419 |
+
ta_pred_hits = pred_center_in_gt(ta_pred_bboxes, ta_gt_bboxes)
|
420 |
+
score_ta = to_mean_percent(ta_pred_hits)
|
421 |
+
|
422 |
+
er_pred_hits = pred_center_in_gt(er_pred_bboxes, er_gt_bboxes)
|
423 |
+
score_er = to_mean_percent(er_pred_hits)
|
424 |
+
|
425 |
+
overall_hits, true_gt_hits, false_gt_hits = calculate_alignment_numpy(er_pred_conclusion, er_gt_conclusion)
|
426 |
+
score_conclusion = to_mean_percent(overall_hits)
|
427 |
+
score_conclusion_gt_true = to_mean_percent(true_gt_hits)
|
428 |
+
score_conclusion_gt_false = to_mean_percent(false_gt_hits)
|
429 |
+
|
430 |
+
# Calculate language-specific metrics for TA
|
431 |
+
score_ta_en = to_mean_percent(ta_pred_hits[ta_en_ids])
|
432 |
+
score_ta_de = to_mean_percent(ta_pred_hits[ta_de_ids])
|
433 |
+
|
434 |
+
# Calculate language-specific metrics for ER (bbox)
|
435 |
+
score_er_en = to_mean_percent(er_pred_hits[er_en_ids])
|
436 |
+
score_er_de = to_mean_percent(er_pred_hits[er_de_ids])
|
437 |
+
|
438 |
+
# Calculate language-specific metrics for ER (conclusion)
|
439 |
+
score_conclusion_en = to_mean_percent(overall_hits[er_en_ids])
|
440 |
+
score_conclusion_de = to_mean_percent(overall_hits[er_de_ids])
|
441 |
+
|
442 |
+
print(f"\n{'Test action visual grounding:':<{36}}{score_ta:.1f}")
|
443 |
+
print(f"{'Expected result visual grounding:':<{36}}{score_er:.1f}")
|
444 |
+
print(f"{'Expected result evaluation:':<{36}}{score_conclusion:.1f}\n")
|
445 |
+
|
446 |
+
eval_out_path = Path(EVAL_PATH).parent / "eval_results.json"
|
447 |
+
|
448 |
+
write_json_file(
|
449 |
+
{
|
450 |
+
"score_ta": score_ta,
|
451 |
+
"score_ta_de": score_ta_de,
|
452 |
+
"score_ta_en": score_ta_en,
|
453 |
+
"score_er": score_er,
|
454 |
+
"score_er_de": score_er_de,
|
455 |
+
"score_er_en": score_er_en,
|
456 |
+
"score_er_conclusion": score_conclusion,
|
457 |
+
"score_er_conclusion_de": score_conclusion_de,
|
458 |
+
"score_er_conclusion_en": score_conclusion_en,
|
459 |
+
"score_conclusion_gt_true": score_conclusion_gt_true,
|
460 |
+
"score_conclusion_gt_false": score_conclusion_gt_false,
|
461 |
+
},
|
462 |
+
path=eval_out_path,
|
463 |
+
)
|
464 |
+
print(f"Stored results at {eval_out_path}")
|
465 |
+
|
466 |
+
if failed_pred_responses:
|
467 |
+
failed_responses_out_path = Path(EVAL_PATH).parent / "failed_responses.json"
|
468 |
+
write_json_file(failed_pred_responses, failed_responses_out_path)
|
469 |
+
print(f"Stored non-parsable responses at {failed_responses_out_path}")
|
470 |
+
|
471 |
+
```
|
472 |
+
</details>
|
473 |
|
474 |
|
475 |
## Results
|
|
|
482 |
| Molmo-7B-D-0924 | 71.3 | 71.4 | 66.9 |
|
483 |
| LAM-270M (TinyClick) | 73.9 | 59.9 | - |
|
484 |
| ELAM-7B (Molmo) | **87.6** | **77.5** | **78.2** |
|
485 |
+
|
486 |
+
# Acknowledgements
|
487 |
+
## Funding
|
488 |
+
This work was supported by German BMBF within the scope of project "KI4BoardNet".
|