Jietson commited on
Commit
f020aef
·
verified ·
1 Parent(s): cdf4db2

Upload 8 files

Browse files
README.md CHANGED
@@ -1,3 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  dataset_info:
3
  features:
@@ -19,7 +88,7 @@ dataset_info:
19
  dtype: string
20
  - name: options
21
  list: string
22
- splits:
23
  - name: info
24
  num_bytes: 9389294399.0
25
  num_examples: 55091
@@ -32,9 +101,9 @@ dataset_info:
32
  - name: visual_basic
33
  num_bytes: 1254942699.466
34
  num_examples: 7297
35
- download_size: 20376840742
36
- dataset_size: 26739208377.466
37
- configs:
38
  - config_name: default
39
  data_files:
40
  - split: info
@@ -45,4 +114,5 @@ configs:
45
  path: data/visual_metaphor-*
46
  - split: visual_basic
47
  path: data/visual_basic-*
 
48
  ---
 
1
+ # InfoChartQA: Benchmark for Multimodal Question Answering on Infographic Charts
2
+ 🤗[Dataset](https://huggingface.co/datasets/Jietson/InfoChartQA)
3
+
4
+ # Dataset
5
+ You can find our dataset on huggingface: 🤗[InfoChartQA Dataset](https://huggingface.co/datasets/Jietson/InfoChartQA)
6
+
7
+ # Usage
8
+
9
+ Each question entry is arranged as:
10
+
11
+ ```
12
+ --question_id: int
13
+ --qtype: int
14
+ --figure_path: image
15
+ --visual_figure_path: list of image
16
+ --question: str
17
+ --answer: str
18
+ --instructions: str
19
+ --prompt: str
20
+ --options: list of dict ("A/B/C/D":"option_content")
21
+ ```
22
+
23
+ Each question is built as:
24
+
25
+ ```
26
+ image_input: figure_path, visual_figure_path_1...visual_figure_path_n (if any)
27
+ text_iunput: prompt (if any) + question + options (if any) + instructions (if any)
28
+ ```
29
+
30
+ # Evaluate
31
+
32
+ You should store and evaluate model's response as:
33
+
34
+ ```python
35
+ # Example code for evaluate
36
+ def build_question(query):#to build the question
37
+ question = ""
38
+ if "prompt" in query:
39
+ question = question + f"{query["prompt"]}\n"
40
+ question = question + f"{query["question"]}\n"
41
+ if "options" in query:
42
+ for _ in query["options"]:
43
+ question = question + f"{_} {query['options'][_]}\n"
44
+ if "instructions" in query:
45
+ question = question + query["instructions"]
46
+ return question
47
+
48
+ with open("visual_basic.json","r",encode="utf-8") as f:
49
+ queries = json.load(f)
50
+
51
+ for idx in range(queries):
52
+ question = build_question(queries[idx])
53
+ figure_path = [queries[idx]['figure_path']]
54
+ visual_figure_path = queries[idx]['visual_figure_path']
55
+
56
+ response = model.generate(question, [figure_path, visual_figure_path])# generate model's response based on
57
+
58
+ queries[idx]["response"] = reponse
59
+
60
+ with open("model_reponse.json","w",encode="utf-8") as f:
61
+ json.dump(queries, f)
62
+ from checker import evaluate
63
+ evaluate("model_reponse.json", "path_to_save_the_result")
64
+ ```
65
+
66
+
67
+
68
+
69
+
70
  ---
71
  dataset_info:
72
  features:
 
88
  dtype: string
89
  - name: options
90
  list: string
91
+ splits:
92
  - name: info
93
  num_bytes: 9389294399.0
94
  num_examples: 55091
 
101
  - name: visual_basic
102
  num_bytes: 1254942699.466
103
  num_examples: 7297
104
+ download_size: 20376840742
105
+ dataset_size: 26739208377.466
106
+ configs:
107
  - config_name: default
108
  data_files:
109
  - split: info
 
114
  path: data/visual_metaphor-*
115
  - split: visual_basic
116
  path: data/visual_basic-*
117
+
118
  ---
eval/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .checker import evaluate
eval/checker.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from collections import defaultdict
4
+ from compare_value import compare_value
5
+ from compare_sequence import is_sequence_match_ordered, is_sequence_match_unordered
6
+ from compare_str import fuzzy_string_match
7
+ from compare_multiple import multiple_choice_checker
8
+
9
+ def has_more_digits_than_other_chars(s):
10
+ if isinstance(s, (int, float)):
11
+ return True
12
+ s = s.replace('.', '1')
13
+ s = s.replace(',', '1')
14
+ s = s.replace('$', '1')
15
+ s = s.replace('B', '1')
16
+ s = s.replace('T', '1')
17
+ s = s.replace('K', '1')
18
+ digit_count = 0
19
+ other_count = 0
20
+ for char in s:
21
+ if char.isdigit():
22
+ digit_count += 1
23
+ else:
24
+ other_count += 1
25
+
26
+ return digit_count > other_count
27
+
28
+
29
+ def evaluate_answer(answer, response, qtype):
30
+ if qtype in [1, 2, 101, 102]:
31
+ if has_more_digits_than_other_chars(answer):
32
+ return "Exact Numeric", compare_value(answer, response)
33
+ else:
34
+ return "Vague String", fuzzy_string_match(answer, response)
35
+ elif qtype in [72, 54]:
36
+ return "Exact Numeric", compare_value(answer, response)
37
+ elif qtype in [10, 50, 51, 52, 110]:
38
+ return "Vague Numeric", compare_value(answer, response, eps=0.05)
39
+ elif qtype in [13, 103, 113]:
40
+ if answer.lower() in response.lower():
41
+ return "Exact String", True
42
+ return "Exact String", compare_value(answer, response)
43
+ elif qtype in [40, 41, 42, 43, 44]:
44
+ response = response.replace("\n", ",")
45
+ response = response.replace(" ", "")
46
+ answer = answer.replace(" ", "")
47
+ return "Vague Unordered Sequence", is_sequence_match_unordered(answer.split(","), response.split(","), fuzzy=True)
48
+ elif qtype in [60, 61, 70, 80, 90]:
49
+ return "Vague String", fuzzy_string_match(answer, response)
50
+ elif qtype in [71]:
51
+ response = response.replace("\n\n", "")
52
+ response = response.replace("\n", ",")
53
+ response = response.replace(" ", "")
54
+ response = response.replace("<", ",")
55
+ response = response.replace(">", ",")
56
+ if response.count(":") == 1:
57
+ response = response[response.find(':') + 1:]
58
+ answer = answer.replace(" ", "")
59
+ return "Vague Ordered Sequence", is_sequence_match_ordered(answer.split(","), response.split(","), fuzzy=True)
60
+ elif qtype in [30]:
61
+ for an in answer:
62
+ if is_sequence_match_ordered(an.split(","), response.split(","), fuzzy=True):
63
+ return "Vague Ordered Sequence", True
64
+ return "Vague Ordered Sequence", False
65
+ elif qtype in [202,1919810,1919811,1919812]:
66
+ return "Exact String", multiple_choice_checker(answer , response)
67
+ else:
68
+ print('there is no qtype',qtype)
69
+ return "Exact Numeric", compare_value(answer, response)
70
+
71
+
72
+ def process_json_data(json_data):
73
+ results = []
74
+ stats = {
75
+ 'qtype_stats': defaultdict(lambda: {'correct': 0, 'total': 0}),
76
+ 'figure_stats': defaultdict(lambda: {'correct': 0, 'total': 0}),
77
+ 'total_correct': 0,
78
+ 'total_questions': 0
79
+ }
80
+
81
+ for key, item in json_data.items():
82
+ question_id = item["question_id"]
83
+ if 'qtype' in item:
84
+ qtype = item["qtype"]
85
+ elif 'qid' in item:
86
+ qtype = item["qid"]
87
+ else:
88
+ qtype = 1
89
+ if "response" not in item or item['response'] == 'Error!':
90
+ continue
91
+
92
+ answer = str(item["answer"])
93
+ response = str(item["response"])
94
+ response = response.replace(" "," ")
95
+
96
+
97
+ figure_path = item["figure_path"]
98
+ if type(figure_path) == list:
99
+ figure_path = figure_path[0]
100
+
101
+ eval_method, score = evaluate_answer(answer, response, qtype)
102
+
103
+ results.append({
104
+ "figure_path": figure_path,
105
+ "answer": answer,
106
+ "response": response,
107
+ "question": item["question"] if "question" in item else "",
108
+ "question_id": question_id,
109
+ "qtype": qtype,
110
+ "score": score,
111
+ "eval_method": eval_method
112
+ })
113
+
114
+ stats['qtype_stats'][qtype]['correct'] += score
115
+ stats['qtype_stats'][qtype]['total'] += 1
116
+
117
+ stats['figure_stats'][figure_path]['correct'] += score
118
+ stats['figure_stats'][figure_path]['total'] += 1
119
+
120
+ stats['total_correct'] += score
121
+ stats['total_questions'] += 1
122
+
123
+ return results, stats
124
+
125
+
126
+ def calculate_accuracy(correct, total):
127
+ return round(correct / total * 100, 2) if total > 0 else 0.0
128
+
129
+
130
+ def generate_stat_report(stats):
131
+ report = {}
132
+
133
+ report['overall_accuracy'] = calculate_accuracy(
134
+ stats['total_correct'], stats['total_questions'])
135
+
136
+ qtype_report = {}
137
+ for qtype, counts in stats['qtype_stats'].items():
138
+ qtype_report[f"qtype_{qtype}"] = {
139
+ 'accuracy': calculate_accuracy(counts['correct'], counts['total']),
140
+ 'correct': counts['correct'],
141
+ 'total': counts['total']
142
+ }
143
+ report['qtype_accuracy'] = qtype_report
144
+
145
+ figure_report = {}
146
+ for figure_path, counts in stats['figure_stats'].items():
147
+ figure_report[figure_path] = {
148
+ 'accuracy': calculate_accuracy(counts['correct'], counts['total']),
149
+ 'correct': counts['correct'],
150
+ 'total': counts['total']
151
+ }
152
+ report['figure_accuracy'] = figure_report
153
+
154
+ return report
155
+
156
+ from copy import deepcopy
157
+ def evaluate(input_file, output_file=None, stats_file=None):
158
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
159
+ with open(input_file, 'r', encoding='utf-8') as f:
160
+ data = json.load(f)
161
+ if type(data).__name__=='list':
162
+ __ = deepcopy(data)
163
+ data = {}
164
+ for _ in __:
165
+ data[_['question_id']] = deepcopy(_)
166
+
167
+
168
+ results, stats = process_json_data(data)
169
+ report = generate_stat_report(stats)
170
+
171
+ if output_file:
172
+ with open(output_file, 'w', encoding='utf-8') as f:
173
+ json.dump(results, f, indent=2, ensure_ascii=False)
174
+ print(f"Score save to {output_file}")
175
+
176
+ if stats_file:
177
+ with open(stats_file, 'w', encoding='utf-8') as f:
178
+ json.dump(report, f, indent=2, ensure_ascii=False)
179
+ print(f"Statis saved to {stats_file}")
180
+
181
+ print(f"Acc: {report['overall_accuracy']}% {stats['total_questions']}")
182
+
183
+ return results, report
eval/compare_multiple.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import random
3
+ def parse_multi_choice_response(response, all_choices, index2ans):
4
+ """
5
+ Parse the prediction from the generated response.
6
+ Return the predicted index e.g., A, B, C, D.
7
+ """
8
+ for char in [',', '.', '!', '?', ';', ':', "'"]:
9
+ response = response.strip(char)
10
+ response = " " + response + " " # add space to avoid partial match
11
+
12
+ index_ans = True
13
+ ans_with_brack = False
14
+ candidates = []
15
+ for choice in all_choices: # e.g., (A) (B) (C) (D)
16
+ if f'({choice})' in response:
17
+ candidates.append(choice)
18
+ ans_with_brack = True
19
+ if len(candidates) == 0:
20
+ for choice in all_choices: # e.g., A B C D
21
+ if f' {choice} ' in response:
22
+ candidates.append(choice)
23
+
24
+ # if all above doesn't get candidates, check if the content is larger than 5 tokens and try to parse the example
25
+ if len(candidates) == 0 and len(response.split()) > 5:
26
+ for index, ans in index2ans.items():
27
+ if ans.lower() in response.lower():
28
+ candidates.append(index)
29
+ index_ans = False # it's content ans.
30
+
31
+ # print("haha",candidates)
32
+ if len(candidates) == 0: # still not get answer, randomly choose one.
33
+ pred_index = 'None' + str(random.randint(0,255))
34
+ elif len(candidates) > 1:
35
+ start_indexes = []
36
+ if index_ans:
37
+ if ans_with_brack:
38
+ for can in candidates:
39
+ index = response.rfind(f'({can})')
40
+ start_indexes.append(index) # -1 will be ignored anyway
41
+ # start_indexes = [generated_response.index(f'({can})') for can in candidates]
42
+ else:
43
+ for can in candidates:
44
+ index = response.rfind(f" {can} ")
45
+ start_indexes.append(index)
46
+ else:
47
+ for can in candidates:
48
+ index = response.lower().rfind(index2ans[can].lower())
49
+ start_indexes.append(index)
50
+ # get the last one
51
+ pred_index = candidates[np.argmax(start_indexes)]
52
+ else: # if only one candidate, use it.
53
+ pred_index = candidates[0]
54
+
55
+ return pred_index
56
+
57
+
58
+
59
+ def multiple_choice_checker(
60
+ gt : str,
61
+ pred : str
62
+ ) -> bool:
63
+ if len(pred) > 50:
64
+ pred = pred[-10:]
65
+ pred = pred.split(',')
66
+ gt = gt.upper().split(',')
67
+
68
+ # pred = (#)
69
+ jury = set([parse_multi_choice_response(_, ["A","B","C","D"] , {}) for _ in pred])
70
+ std = set([parse_multi_choice_response(_, ["A", "B", "C", "D"], {}) for _ in gt])
71
+ # print(jury,std)
72
+ return jury == std
73
+ if __name__ == "__main__":
74
+ print(multiple_choice_checker('B',"According to Figure 1, which details \"Iraq's bloody toll\" with a focus on the massive number of civilian and other deaths, the predominant theme is one of immense loss of life and tragedy. The graph visually represents the scale of death over several years. Let's analyze the options in this context:\n\n(A) The fear of blood: While \"bloody toll\" is in the title and red is a dominant color, the graph itself quantifies deaths. Fear of blood is a specific phobia or immediate reaction to violence, but the graph presents the aftermath and scale of death, which is broader than just the fear of blood.\n\n(B) The grief for death: Figure 1 overwhelmingly presents statistics about death. The sheer number of casualties (e.g., 113,726 civilian deaths) directly points to widespread loss. Grief is the natural and profound emotional response to such extensive death. The purpose of such a graphic is often to convey the human cost, which is closely associated with grief.\n\n(C) The amazement at quantity: The numbers are indeed shockingly large, and one might be amazed or stunned by the scale. However, \"amazement\" alone doesn't fully capture the emotional gravity of what these quantities represent – human lives lost. It's a more intellectual or initial reaction.\n\n(D) The indifference of society: Figure 1, by its nature as an informative graphic likely published to raise awareness, aims to combat indifference rather than depict it as a predominant emotion stemming from the facts. It highlights a serious issue, implicitly calling for attention and concern.\n\nConsidering the central message of Figure 1 is the staggering number of deaths and the tragic human cost of the conflict, the most predominant emotion that Figure 1 would suggest for a related Figure 2 (which would presumably depict the human aspect of this toll) is grief. The data in Figure 1 is a quantifiable representation of events that would cause widespread grief.\n\nThe final answer is $\\ B $."))
eval/compare_sequence.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import List, Hashable, Union
3
+ from collections import Counter
4
+
5
+
6
+ def is_sequence_valid(sequence: List[Union[Hashable, str]],
7
+ case_sensitive: bool = False,
8
+ strip_spaces: bool = True,
9
+ fuzzy_duplicates: bool = False,
10
+ fuzzy_threshold: float = 0.6) -> bool:
11
+ """
12
+ 检查序列是否合法(无重复元素)
13
+
14
+ 参数:
15
+ sequence: 待检查的序列
16
+ case_sensitive: 是否区分大小写(仅适用于字符串)
17
+ strip_spaces: 是否去除字符串两端空格
18
+ fuzzy_duplicates: 是否启用模糊查重(仅适用于字符串)
19
+ fuzzy_threshold: 模糊匹配阈值(0-1)
20
+
21
+ 返回:
22
+ bool: True表示无重复(合法),False表示有重复(非法)
23
+
24
+ 示例:
25
+ >>> is_sequence_valid(["A", "B", "C"]) # True
26
+ >>> is_sequence_valid(["A", "a"], case_sensitive=False) # False
27
+ >>> is_sequence_valid([" apple ", "apple"]) # False
28
+ """
29
+ if not sequence:
30
+ return True
31
+
32
+ processed = []
33
+ # print(sequence)
34
+ for item in sequence:
35
+ # print(item)
36
+ if isinstance(item, str):
37
+ # 字符串预处理
38
+ processed_item = item
39
+ if not case_sensitive:
40
+ processed_item = processed_item.lower()
41
+ if strip_spaces:
42
+ processed_item = processed_item.strip()
43
+ processed.append(processed_item)
44
+ else:
45
+ processed.append(item)
46
+
47
+ # 常规检查(精确匹配)
48
+ # print(processed)
49
+ if not fuzzy_duplicates:
50
+ return len(processed) == len(set(processed))
51
+
52
+ # 模糊查重模式
53
+ for i in range(len(processed)):
54
+ for j in range(i + 1, len(processed)):
55
+ if isinstance(processed[i], str) and isinstance(processed[j], str):
56
+ # 使用difflib进行模糊匹配
57
+ from difflib import SequenceMatcher
58
+ similarity = SequenceMatcher(None, processed[i], processed[j]).ratio()
59
+ if similarity >= fuzzy_threshold:
60
+ return False
61
+ else:
62
+ # 非字符串类型退化为精确匹配
63
+ if processed[i] == processed[j]:
64
+ return False
65
+ return True
66
+
67
+
68
+ def extract_answers_from_file(file_path):
69
+ """
70
+ 从JSON文件中读取数据并提取answer序列
71
+
72
+ 参数:
73
+ file_path: str - JSON文件路径
74
+
75
+ 返回:
76
+ dict - 包含提取序列和元数据的字典
77
+ """
78
+ try:
79
+ # 读取JSON文件
80
+ with open(file_path, 'r', encoding='utf-8') as f:
81
+ input_data = json.load(f)
82
+
83
+ # 初始化结果字典
84
+ result = {
85
+ "sequences": [],
86
+ "details": []
87
+ }
88
+
89
+ # 遍历每个条目
90
+ for key, item in input_data.items():
91
+ # 检查answer字段是否存在
92
+ if 'answer' not in item:
93
+ continue
94
+
95
+ # 提取answer并按逗号分割成序列,去除前后空格
96
+ answer_sequence = [x.strip() for x in str(item['answer']).split(',')]
97
+
98
+ # 存储序列和相关信息
99
+ result["sequences"].append(answer_sequence)
100
+ result["details"].append({
101
+ "question_id": item.get("question_id", ""),
102
+ "figure_path": item.get("figure_path", ""),
103
+ "qtype": item.get("qtype", -1),
104
+ "question": item.get("question", ""),
105
+ "sequence_length": len(answer_sequence)
106
+ })
107
+
108
+ return result
109
+
110
+ except FileNotFoundError:
111
+ print(f"错误:文件 {file_path} 未找到")
112
+ return None
113
+ except json.JSONDecodeError:
114
+ print("错误:文件内容不是有效的JSON格式")
115
+ return None
116
+ except Exception as e:
117
+ print(f"处理文件时发生错误:{str(e)}")
118
+ return None
119
+
120
+
121
+ from difflib import SequenceMatcher
122
+ from typing import List, Union, Optional
123
+
124
+
125
+ def fuzzy_match(s1: str, s2: str, threshold: float = 0.6) -> bool:
126
+ """
127
+ 模糊字符串匹配(基于相似度阈值)
128
+ :param s1: 字符串1
129
+ :param s2: 字符串2
130
+ :param threshold: 相似度阈值(0-1)
131
+ :return: 是否匹配
132
+ """
133
+ flag = False
134
+ flag |= SequenceMatcher(None, s1.lower().strip(), s2.lower().strip()).ratio() >= threshold
135
+ flag |= s1 in s2
136
+ flag |= s2 in s1
137
+ # print(s1 , s2 , SequenceMatcher(None, s1.lower().strip(), s2.lower().strip()).ratio(),flag)
138
+ return flag
139
+
140
+
141
+ def is_sequence_match_ordered(
142
+ seq1: List[str],
143
+ seq2: List[str],
144
+ fuzzy: bool = False,
145
+ threshold: float = 0.6
146
+ ) -> bool:
147
+ """
148
+ 检查两个序列是否顺序完全一致
149
+ :param seq1: 序列1
150
+ :param seq2: 序列2
151
+ :param fuzzy: 是否启用模糊匹配
152
+ :param threshold: 模糊匹配阈值
153
+ :return: 是否匹配
154
+ """
155
+ if len(seq1) != len(seq1):
156
+ return False
157
+
158
+ if not is_sequence_valid(seq1, case_sensitive=True):
159
+ return False
160
+
161
+ if not is_sequence_valid(seq2, case_sensitive=True):
162
+ return False
163
+
164
+ # print(seq1 , seq2)
165
+ if fuzzy:
166
+ return all(fuzzy_match(x, y, threshold) for x, y in zip(seq1, seq2))
167
+ else:
168
+ return all(x.strip().lower() == y.strip().lower() for x, y in zip(seq1, seq2))
169
+
170
+
171
+ def is_sequence_match_unordered(
172
+ seq1: List[str],
173
+ seq2: List[str],
174
+ fuzzy: bool = False,
175
+ threshold: float = 0.8
176
+ ) -> bool:
177
+ """
178
+ 检查两个序列是否元素一致(不考虑顺序)
179
+ :param seq1: 序列1
180
+ :param seq2: 序列2
181
+ :param fuzzy: 是否启用模糊匹配
182
+ :param threshold: 模糊匹配阈值
183
+ :return: 是否匹配
184
+ """
185
+ if len(seq1) != len(seq2):
186
+ return False
187
+
188
+ seq1_processed = [s.lower().strip() for s in seq1]
189
+ seq2_processed = [s.lower().strip() for s in seq2]
190
+
191
+ if fuzzy:
192
+ # 构建双向最佳匹配
193
+ matched_indices = set()
194
+ for i, s1 in enumerate(seq1):
195
+ for j, s2 in enumerate(seq2):
196
+ if j not in matched_indices and fuzzy_match(s1, s2, threshold):
197
+ matched_indices.add(j)
198
+ break
199
+ return len(matched_indices) == len(seq1)
200
+ else:
201
+ return sorted(seq1_processed) == sorted(seq2_processed)
202
+
203
+
204
+ # 测试用例
205
+ if __name__ == "__main__":
206
+ A = "Russia, DR Congo, Ethiopia, Bangladesh, Iraq, Yemen, Pakistan, India"
207
+ B = "Russia: 2 \nD.R. Congo: 3 \nEthiopia: 5 \nBangladesh: 5 \nIraq: 7 \nYemen: 7 \nPakistan: 12 \nIndia: 134"
208
+ B = B.replace("\n", ",")
209
+ B = B.replace(" ", "")
210
+ A = A.replace(" ", "")
211
+ print(is_sequence_match_ordered(A.split(","), B.split(","), fuzzy=True))
212
+
213
+ # 测试数据
214
+ exact_ordered = ["Apple", "Banana", "Orange"]
215
+ exact_unordered = ["Banana", "Orange", "Apple"]
216
+ fuzzy_ordered = [" Apple ", "banana", "Orang"]
217
+ fuzzy_unordered = ["banan", "orang", " apple"]
218
+
219
+ # 精确顺序匹配测试
220
+ print("精确顺序匹配:")
221
+ print(exact_ordered, exact_ordered, is_sequence_match_ordered(exact_ordered, exact_ordered)) # True
222
+ print(exact_ordered, exact_unordered, is_sequence_match_ordered(exact_ordered, exact_unordered)) # False
223
+
224
+ # 精确无序匹配测试
225
+ print("\n精确无序匹配:")
226
+ print(exact_ordered, exact_unordered, is_sequence_match_unordered(exact_ordered, exact_unordered)) # True
227
+ print(exact_ordered, ["Apple", "Banana"], is_sequence_match_unordered(exact_ordered, ["Apple", "Banana"])) # False
228
+
229
+ # 模糊顺序匹配测试
230
+ print("\n模糊顺序匹配:")
231
+ print(exact_ordered, fuzzy_ordered, is_sequence_match_ordered(exact_ordered, fuzzy_ordered, fuzzy=True)) # True
232
+ print(exact_ordered, fuzzy_unordered,
233
+ is_sequence_match_ordered(exact_ordered, fuzzy_unordered, fuzzy=True)) # False
234
+
235
+ # 模糊无序匹配测试
236
+ print("\n模糊无序匹配:")
237
+ print(exact_ordered, fuzzy_unordered,
238
+ is_sequence_match_unordered(exact_ordered, fuzzy_unordered, fuzzy=True)) # True
239
+ print(exact_ordered, ["App", "Banan"],
240
+ is_sequence_match_unordered(exact_ordered, ["App", "Banan"], fuzzy=True)) # False
241
+
242
+ answer = "Trondheim,Munich,TheHague,Muscat,RasAlKhaimah,Dubai,Taipei,Doha,Ajman,AbuDhabi"
243
+ response = "Trondheim,Munich,TheHague,Muscat,RasAlKhaimah,Dubai,Taipei,Doha,Ajman,AbuDhabi"
244
+ print(is_sequence_match_ordered(answer.split(","), response.split(","), fuzzy=True))
245
+
246
+ assert is_sequence_valid(["A", "B", "C"]) == True
247
+ assert is_sequence_valid(["A", "A"]) == False
248
+
249
+ # 大小写测试
250
+ assert is_sequence_valid(["A", "a"], case_sensitive=False) == False
251
+ assert is_sequence_valid(["A", "a"], case_sensitive=True) == True
252
+
253
+ # 空格处理测试
254
+ assert is_sequence_valid(["apple", " apple "]) == False
255
+ assert is_sequence_valid(["apple", " apple "], strip_spaces=False) == True
256
+
257
+ # 模糊匹配测试
258
+ assert is_sequence_valid(["apple", "applee"], fuzzy_duplicates=True) == False
259
+ assert is_sequence_valid(["apple", "aple"], fuzzy_duplicates=True, fuzzy_threshold=0.8) == False
260
+ assert is_sequence_valid(["apple", "orange"], fuzzy_duplicates=True) == True
261
+
262
+ # 混合类型测试
263
+ assert is_sequence_valid([1, "1"]) == True
264
+ assert is_sequence_valid([1, 1]) == False
265
+
266
+ # 边界情况
267
+ assert is_sequence_valid([]) == True
268
+ assert is_sequence_valid([None, None]) == False
269
+
270
+
eval/compare_str.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def is_string_match(answer, response, case_sensitive=False):
2
+ """
3
+ 判断两个字符串是否匹配,支持模糊子串匹配
4
+
5
+ 参数:
6
+ answer: 预期字符串
7
+ response: 待检查字符串
8
+ case_sensitive: 是否区分大小写(默认不区分)
9
+
10
+ 返回:
11
+ bool: 是否匹配
12
+ """
13
+ if not case_sensitive:
14
+ answer = answer.lower()
15
+ response = response.lower()
16
+
17
+ # 1. 完全一致
18
+ if answer == response:
19
+ return True
20
+
21
+ # 2. answer是response的子串
22
+ if answer in response:
23
+ return True
24
+
25
+ # 3. 模糊匹配:允许少量字符不匹配(简单实现)
26
+ # 这里使用简单的子序列检查,可以根据需求替换为更复杂的模糊匹配算法
27
+ len_answer = len(answer)
28
+ len_response = len(response)
29
+
30
+ # answer比response长,肯定不是子串
31
+ if len_answer > len_response:
32
+ return False
33
+
34
+ # 简单子序列检查(允许中间有少量不匹配字符)
35
+ i = j = 0
36
+ mismatch_count = 0
37
+ max_mismatch = max(1, len_answer // 4) # 允许25%的字符不匹配
38
+
39
+ while i < len_answer and j < len_response:
40
+ if answer[i] == response[j]:
41
+ i += 1
42
+ j += 1
43
+ else:
44
+ j += 1
45
+ mismatch_count += 1
46
+ if mismatch_count > max_mismatch:
47
+ return False
48
+
49
+ return i == len_answer
50
+
51
+
52
+ # 更强大的模糊匹配版本(使用difflib)
53
+ from difflib import SequenceMatcher
54
+
55
+ def fuzzy_string_match(answer, response, threshold=0.8, case_sensitive=False):
56
+ """
57
+ 使用difflib的模糊匹配
58
+
59
+ 参数:
60
+ threshold: 相似度阈值(0-1)
61
+ """
62
+ if not case_sensitive:
63
+ answer = answer.lower()
64
+ response = response.lower()
65
+
66
+ # 完全匹配
67
+ if answer == response:
68
+ return True
69
+
70
+ # 子串匹配
71
+ if answer in response:
72
+ return True
73
+
74
+ # 模糊匹配
75
+ similarity = SequenceMatcher(None, answer, response).ratio()
76
+ return similarity >= threshold
77
+
78
+ # 在问答系统中的应用
79
+ # qa_pairs = [
80
+ # {"answer": "人工智能", "response": "AI(人工智能)是未来趋势"},
81
+ # {"answer": "Python", "response": "我们使用python编程"},
82
+ # {"answer": "机器学习", "response": "深度学习是机器学习的分支"},
83
+ # {"answer": "42", "response": "答案是42"},
84
+ # {"answer": "hello", "response": "hi there"}
85
+ # ]
86
+
87
+ # print("\n问答系统匹配结果:")
88
+ # for pair in qa_pairs:
89
+ # matched = fuzzy_string_match(pair["answer"], pair["response"])
90
+ # print(f"Answer: '{pair['answer']}' | Response: '{pair['response']}' → "
91
+ # f"{'匹配' if matched else '不匹配'}")
eval/compare_value.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from compare_str import fuzzy_string_match
3
+ unit_map = {
4
+ 'K': 1e3,
5
+ 'k': 1e3,
6
+ 'M': 1e6, # 百万
7
+ 'm': 1e6, # 百万
8
+ 'million': 1e6, # 百万
9
+ 'bn': 1e9, # 百万
10
+ 'Bn': 1e9, # 百万
11
+ 'b': 1e9, # 百万
12
+
13
+ 'B': 1e9, # 十亿
14
+ 'T': 1e12,
15
+ "%": 1e-2,
16
+ "Cr": 1e8,
17
+ "None": 1,
18
+ "Billion": 1e9
19
+ }
20
+
21
+ def extract_numbers_keep_order(text):
22
+ matches = []
23
+
24
+ # 1. 包含逗号的数字(不能包含小数点)
25
+ for m in re.finditer(r'-?(?:\d+,)+\d+', text):
26
+ if '.' not in m.group():
27
+ matches.append((m.start(), m.group()))
28
+
29
+ # 2. 小数(不能包含逗号)
30
+ for m in re.finditer(r'-?\d+\.\d+', text):
31
+ if ',' not in m.group():
32
+ matches.append((m.start(), m.group()))
33
+
34
+ # 3. 纯整数(不包含点或逗号,且不嵌套在已有匹配中)
35
+ for m in re.finditer(r'(?<![\d.,])-?\d+(?![\d.,])', text):
36
+ val = m.group()
37
+ start = m.start()
38
+ # 避免包含在已有匹配中
39
+ if '.' not in val and ',' not in val and all(not (start >= s and start < s + len(v)) for s, v in matches):
40
+ matches.append((start, val))
41
+
42
+ # 排序按出现顺序
43
+ matches.sort()
44
+
45
+ # 返回匹配值
46
+ return [v for _, v in matches]
47
+
48
+ def get_last_number(value):
49
+ numbers = extract_numbers_keep_order(value)
50
+ if len(numbers) == 0:
51
+ return None
52
+ value = numbers[-1]
53
+
54
+ if value.count('.') > 0:
55
+ if value.count('.') == 1:
56
+ return convert(value)
57
+ if only_digits_and_commas(value, '.'):
58
+ return convert(value.replace(".",""))
59
+ return value
60
+ elif value.count(',') > 0:
61
+ if only_digits_and_commas(value, ','):
62
+ return convert(value.replace(",",""))
63
+ if value.count(',') == 1:
64
+ return convert(value.replace(',','.'))
65
+ return value
66
+ return value
67
+
68
+ def only_digits_and_commas(s, divide):
69
+ res1 = False
70
+ res2 = False
71
+ if divide == ",":
72
+ pattern = r'^\d{1,3}(,\d{3})*$'
73
+ if bool(re.fullmatch(r'[0-9,]+', s)):
74
+ res1 = True
75
+ if is_valid_thousand_separator(s, divide):
76
+ res2 = True
77
+ elif divide == ".":
78
+ pattern = r'^\d{1,3}(.\d{3})*$'
79
+ if bool(re.fullmatch(r'[0-9,]+', s)):
80
+ res1 = True
81
+ if is_valid_thousand_separator(s, divide):
82
+ res2 = True
83
+ return res1, res2
84
+
85
+
86
+
87
+ def is_valid_thousand_separator_old(s, divide):
88
+ # 匹配是否为合法的千分位格式(例如:1,234,567)
89
+ if divide == ",":
90
+ pattern = r'^[-+]?\d{1,3}(,\d{3})*(\.(\d*))?$'
91
+ elif divide == ".":
92
+ pattern = r'^[-+]?\d{1,3}(.\d{3})*(\,(\d*))?$'
93
+ else:
94
+ return None
95
+ return bool(re.match(pattern, s))
96
+
97
+ def convert(x):
98
+ x_str = str(x)
99
+ if x_str.replace('.', '', 1).isdigit() or (x_str.startswith('-') and x_str[1:].replace('.', '', 1).isdigit()):
100
+ # print("convert",x)
101
+ return int(float(x)) if float(x).is_integer() else float(x)
102
+ # print("no need to convert",x)
103
+ return x
104
+
105
+ def contains_number(s):
106
+ for ch in s:
107
+ if is_standard_digit(ch):
108
+ return True
109
+ return False
110
+
111
+ def clean(x):
112
+ x = str(x)
113
+ x = x.replace(" ","")
114
+ x = x.replace("$","")
115
+ x = x.replace("\n","")
116
+ return convert(x)
117
+ def is_standard_digit(char):
118
+ return bool(re.match(r'^[0-9]$', char))
119
+ def get_unit(value):
120
+ _v = str(value)
121
+ n = len(_v)
122
+ R , L = n , 0
123
+ for i in range(n - 1, -1, -1):
124
+ if value[i].isalpha() or value[i] == '%':
125
+ R = i
126
+ break
127
+
128
+ # print('debugging',L , R + 1)
129
+ if R == n:
130
+ return "None"
131
+ for i in range(R, -1, -1):
132
+ if not value[i].isalpha() and value[i] != '%':
133
+ L = i + 1
134
+ break
135
+ if L > R:
136
+ return "None"
137
+ return value[L : R + 1]
138
+
139
+
140
+
141
+ def loose_is_digit(s):
142
+ for ch in s:
143
+ if is_standard_digit(ch) or ch == ',' or ch == '.' or ch == '+' or ch == '-':
144
+ continue
145
+ return False
146
+ return True
147
+
148
+ def get_numeric(value):
149
+ _v = str(value)
150
+ n = len(_v)
151
+ L , R = -1 , n
152
+
153
+ i = 0
154
+ while i < n:
155
+ if not is_standard_digit(value[i]) and value[i] != '+' and value[i] != '-':
156
+ i = i + 1
157
+ continue
158
+ j = i
159
+ while j < n and loose_is_digit(value[j]):
160
+ j = j + 1
161
+ L, R = i, j
162
+ # print("cnm",i , j,value[i:j])
163
+ i = j
164
+ if L == -1:
165
+ return 0
166
+ else:
167
+ return value[L : R]
168
+ def convert_to_number(value):
169
+ unit_part = get_unit(value)
170
+
171
+ value = str(value).replace("$","")
172
+ if is_number(value):
173
+ return float(value)
174
+ # 提取数字部分和单位部分
175
+ if not is_number(value[:-1]):
176
+ return value
177
+ number_part = float(value[:-1]) # 去掉最后一个字符(单位)
178
+ return number_part * unit_part
179
+
180
+
181
+ def is_number(s):
182
+ try:
183
+ float(s) # 尝试将字符串转换为浮点数
184
+ return True
185
+ except ValueError:
186
+ return False
187
+
188
+ def is_valid_thousand_separator(s, divide):
189
+ # 匹配是否为合法的千分位格式(例如:1,234,567)
190
+
191
+ if divide == ",":
192
+ pattern = r'^[-+]?\d{1,3}(,\d{3})*(\.(\d*))?$'
193
+ elif divide == ".":
194
+ pattern = r'^[-+]?\d{1,3}(.\d{3})*(\,(\d*))?$'
195
+ else:
196
+ return False
197
+ return bool(re.match(pattern, s))
198
+
199
+ def Convert2Number(value):
200
+ # print('fucker',value)
201
+ if value[-1] == '.':
202
+ value = value[:-1]
203
+ f = 1
204
+ # print('fucker',value)
205
+ if (value[0] == '+') or (value[0] == '-'):
206
+ f = 1 if (value[0] == '+') else 0
207
+ value = value[1:]
208
+ sep, comma = ',', '.'
209
+ # print(value,is_valid_thousand_separator(value , ','),is_valid_thousand_separator(value , '.'))
210
+ if ((not is_valid_thousand_separator(value , ',')) and is_valid_thousand_separator(value, '.')):
211
+ sep, comma = '.' , ','
212
+ elif (not is_valid_thousand_separator(value , ',')) and (not is_valid_thousand_separator(value, '.')):# 2018,36.8
213
+ value = value.split(',')[-1]
214
+ # print("sep check",value , sep, comma,is_valid_thousand_separator(value , ','))
215
+ cmx = value.replace(sep,"")
216
+ cmx = cmx.replace(comma,".")
217
+ # print(f,value)
218
+ if is_number(cmx):
219
+ return float(cmx) if f else -float(cmx)
220
+ else:
221
+ return -1145141919810
222
+
223
+ # from quantulum3 import parser as PSP
224
+ def get_unit_and_numeric(_s):
225
+ s = str(_s)
226
+ lst = s.split(' ')
227
+ n = len(lst)
228
+ for i in range(n - 1 , -1 , -1):
229
+ if contains_number(lst[i]):
230
+ # print("cmx",lst[i])
231
+ Answer = lst[i]
232
+ Answer = Answer.replace(" ","")
233
+ Answer = Answer.replace("$","")
234
+ Answer = Answer.replace("\n","")
235
+ # print("zst",Answer)
236
+ number = get_numeric(Answer)
237
+ unit = get_unit(Answer)
238
+ if unit == 'None' and i + 1 < n:
239
+ unit = get_unit(lst[i + 1])
240
+ if unit not in unit_map:
241
+ unit = "None"
242
+ return number , unit
243
+ return "1145141919810" , "None"
244
+
245
+ def compare_numeric_value(_answer, _response, eps = 0.001):
246
+ response = _response.replace('\n',' ')
247
+ answer = _answer.replace(' ',' ')
248
+ ans_number, ans_unit = get_unit_and_numeric(answer)
249
+ response_number, response_unit = get_unit_and_numeric(response)
250
+
251
+
252
+ # print(response_number,response_unit)
253
+ # print(ans_number,ans_unit,type(ans_number))
254
+ ans_number = Convert2Number(ans_number)
255
+ response_number = Convert2Number(response_number)
256
+
257
+
258
+ for unit1 in [ans_unit, 'None']:
259
+ for unit2 in [response_unit, 'None']:
260
+ _ = ans_number * unit_map[unit1]
261
+ __ = response_number * unit_map[unit2]
262
+ # print(_ , __, abs((_ - __) / abs(_) ))
263
+ if abs((_ - __) / (0.01 + abs(_))) < eps:
264
+ return True
265
+ # for special_case in [100 , 1000 , 1000000,1000000000]: # special case for % and B->M->k
266
+ for special_case in [100, 1000, 1000000, 1000000000]:
267
+
268
+ if abs(special_case * ans_number - response_number) / (0.01 + abs(special_case * ans_number)) < eps:
269
+ return True
270
+ if abs(special_case * response_number - ans_number) / (0.01 + abs(ans_number)) < eps:
271
+ return True
272
+
273
+
274
+ return False
275
+
276
+ def compare_value(_answer, _response, eps = 0.001):
277
+ answer = str(_answer)
278
+ response = str(_response)
279
+
280
+ # print("{ debugging }",answer , "{ debugging }",response)
281
+ # if answer=="0.085":
282
+ # print('wxh')
283
+
284
+ if contains_number(str(answer)):
285
+ return compare_numeric_value(str(answer), str(response), eps = eps)
286
+ else:
287
+ return fuzzy_string_match(answer , response)
288
+
289
+ if __name__ == '__main__':
290
+ f = compare_value("14.200000000000001","14.2 µg/m³",0.05)
291
+ print(f)
eval/eval_value.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import os
4
+ import shutil
5
+ from pathlib import Path
6
+ from compare_value import is_number, convert, convert_to_number, clean, is_valid_thousand_separator, only_digits_and_commas
7
+ from compare_value import compare_value
8
+ file_idx = 0
9
+
10
+
11
+ def compare(data_chart, data_image):
12
+ matching_question = []
13
+ qid = []
14
+ res = []
15
+ path = {}
16
+ path_score = {}
17
+ for question_id, chart_data in data_chart.items():
18
+ if "response" not in chart_data:
19
+ continue
20
+ # 确保对应 question_id 在 data_image 中
21
+ if question_id in data_image:
22
+ image_data = data_image[question_id]
23
+ if "response" not in image_data or "score" not in image_data:
24
+ continue
25
+ qa = {
26
+ "question_id": chart_data["question_id"],
27
+ "qid":chart_data["qid"],
28
+ "figue":data_qa[chart_data["question_id"]]["figure_path"],
29
+ "question":data_qa[chart_data["question_id"]]["question"],
30
+ "answer":chart_data["answer"],
31
+ "response":chart_data["response"]
32
+ }
33
+ if chart_data["score"] == 0:
34
+ matching_question.append(qa)
35
+ # 检查条件
36
+ if chart_data["score"] == 0 and image_data["score"] == 1:
37
+ qid.append(chart_data["qid"])
38
+
39
+ if data_qa[chart_data["question_id"]]["figure_path"] not in path:
40
+ path[data_qa[chart_data["question_id"]]["figure_path"]] = 0
41
+ path[data_qa[chart_data["question_id"]]["figure_path"]] -= 1
42
+
43
+
44
+ if chart_data["score"] == 1 and image_data["score"] == 0:
45
+ if data_qa[chart_data["question_id"]]["figure_path"] not in path:
46
+ path[data_qa[chart_data["question_id"]]["figure_path"]] = 0
47
+ path[data_qa[chart_data["question_id"]]["figure_path"]] += 1
48
+
49
+
50
+ if data_qa[chart_data["question_id"]]["figure_path"] not in path_score:
51
+ path_score[data_qa[chart_data["question_id"]]["figure_path"]] = {"image": 0, "chart": 0}
52
+ path_score[data_qa[chart_data["question_id"]]["figure_path"]]["image"] += (image_data["score"] if not image_data["score"] == -1 else 0)
53
+ path_score[data_qa[chart_data["question_id"]]["figure_path"]]["chart"] += (chart_data["score"] if not chart_data["score"] == -1 else 0)
54
+
55
+ # 输出结果
56
+ # print("Matching question IDs:", matching_question)
57
+ # with open("compare_res.json", "w") as f:
58
+ # json.dump(matching_question, f, indent=4)
59
+ # print(qid)
60
+ path = dict(sorted(path.items(), key=lambda item: item[1]))
61
+ # print(path)
62
+
63
+ path_score = dict(sorted(path_score.items(), key=lambda item: item[1]["chart"]))
64
+ # print(path_score)
65
+ # for item in path_score:
66
+ # print(item,path_score[item])
67
+
68
+
69
+ easy_path_score = {k: v for k, v in path_score.items() if v['chart'] < v['image']}
70
+ easy_path_score = {k: path_score[k] for k in sorted(easy_path_score)}
71
+ even_path_score = {k: v for k, v in path_score.items() if v['chart'] == v['image']}
72
+ even_path_score = {k: path_score[k] for k in sorted(even_path_score)}
73
+ hard_path_score = {k: v for k, v in path_score.items() if v['chart'] > v['image']}
74
+ hard_path_score = {k: path_score[k] for k in sorted(hard_path_score)}
75
+
76
+ res = {}
77
+ # 如果文件存在,读取原有数据
78
+
79
+ # file_path = "/data/tianchi/minzhi/chartQA/code/ana/bar/simple/gemini/value.json"
80
+ # if Path(file_path).exists():
81
+ # with open(file_path, "r") as f:
82
+ # existing_data = json.load(f)
83
+ # else:
84
+ # existing_data = {}
85
+
86
+ # # 更新数据(假设 path_score 是一个字典)
87
+ # print(len(existing_data))
88
+ # # print(existing_data)
89
+ # print("--------------------------------------")
90
+ # # print(path_score)
91
+ # existing_data.update(path_score) # 或者用其他方式合并数据
92
+
93
+ # print(len(existing_data))
94
+ # # 写入合并后的数据
95
+ # with open(file_path, "w") as f:
96
+ # json.dump(existing_data, f, indent=4)
97
+
98
+
99
+ # # print(path_score.keys())
100
+ # print("-----------------")
101
+
102
+ # global file_idx
103
+ # for item in easy_path_score:
104
+ # print(f"/data/tianchi/minzhi/chartQA/code/CharXiv/img/bar/single/archive{file_idx}/plain_chart/"+item+".png",easy_path_score[item])
105
+ # source_filepath = f"/data/tianchi/minzhi/chartQA/code/CharXiv/img/bar/single/archive{file_idx}/chart/h/"+item+".png"
106
+ # target_filepath = ""
107
+ # if os.path.exists(source_filepath):
108
+ # print(f" {source_filepath}")
109
+ # # shutil.copy(source_filepath, target_filepath)
110
+ # source_filepath = f"/data/tianchi/minzhi/chartQA/code/CharXiv/img/bar/single/archive{file_idx}/chart/v/"+item+".png"
111
+ # if os.path.exists(source_filepath):
112
+ # print(f" {source_filepath}")
113
+ # shutil.copy(source_filepath, target_filepath)
114
+
115
+
116
+
117
+ def eval_value(path):
118
+ print(path.split("/")[-1])
119
+ with open(path, 'r') as file:
120
+ # 解析 JSON 数据
121
+ data = json.load(file)
122
+ count = {0:0, 1:0}
123
+ for idx in data:
124
+ item = data[idx]
125
+ if "response" not in item:
126
+ continue
127
+ # print(item["response"])
128
+ if compare_value(item["answer"], item["response"]):
129
+ data[idx]["score"] = 1
130
+ else:
131
+ data[idx]["score"] = 0
132
+ # if idx == "3":
133
+ # print(item["answer"], item["response"],data[idx]["score"])
134
+ # break
135
+ count[data[idx]["score"]] += 1
136
+
137
+ with open(path.replace(path.split('/')[-1],"score-"+path.split('/')[-1]), 'w') as f:
138
+ json.dump(data, f, indent=4)
139
+ print(count,count[1]/(count[0] + count[1]))
140
+ return count
141
+
142
+
143
+ def analysis(chartpath, imagepath):
144
+ print("")
145
+ eval_value(chartpath)
146
+ eval_value(imagepath)
147
+ chartpath = chartpath.replace(chartpath.split('/')[-1],"score-"+chartpath.split('/')[-1])
148
+ imagepath = imagepath.replace(imagepath.split('/')[-1],"score-"+imagepath.split('/')[-1])
149
+ with open(chartpath, 'r') as file:
150
+ data_chart = json.load(file)
151
+ with open(imagepath, 'r') as file:
152
+ data_image = json.load(file)
153
+ # compare(data_chart, data_image)
154
+ # compare(data_image, data_chart, target_root = "/data/tianchi/minzhi/chartQA/code/ana/bar/simple/value/hard")
155
+
156
+
157
+ file_idx = 3
158
+
159
+ # with open(f"/data/tianchi/CharXiv/data/line/line_QA_value.json", 'r') as file:
160
+ # # 解析 JSON 数据
161
+ # data_qa = json.load(file)
162
+
163
+ # analysis("/data/tianchi/minzhi/chartQA/code/ana/bar/simple/gemini/record/bar-difference-internlm-xcomposer2-4khd-7b-chart.json",
164
+ # "/data/tianchi/minzhi/chartQA/code/ana/bar/simple/gemini/record/bar-difference-internlm-xcomposer2-4khd-7b-image.json")
165
+
166
+
167
+ # model = 'gemini-2.0-flash'
168
+ # analysis(f"/data/tianchi/CharXiv/results/{model}/{model}_line_value_plain.json",
169
+ # f"/data/tianchi/CharXiv/results/{model}/{model}_line_value_info.json")
170
+
171
+ # analysis(f"/data/tianchi/CharXiv/results/{model}/{model}_line_difference_plain.json",
172
+ # f"/data/tianchi/CharXiv/results/{model}/{model}_line_difference_info.json")
173
+
174
+ # file_list = ["/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/gemini-2.0-flash_label_difference_bar_QA.json",
175
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/new_gemini-2.0-flash_label_difference_bar_QA.json",
176
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/gemini-2.0-flash_visual_difference_bar_QA.json",
177
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/new_gemini-2.0-flash_visual_difference_bar_QA.json",
178
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/gemini-2.0-flash_label_value_bar_QA.json",
179
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/new_gemini-2.0-flash_label_value_bar_QA.json",
180
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/gemini-2.0-flash_visual_value_bar_QA.json",
181
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/new_gemini-2.0-flash_visual_value_bar_QA.json",
182
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/line_gemini-2.0-flash_label_difference_QA.json",
183
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/line_gemini-2.0-flash_visual_difference_QA.json",
184
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/line_gemini-2.0-flash_label_value_QA.json",
185
+ # "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/line_gemini-2.0-flash_visual_value_QA.json"
186
+ # ]
187
+ file_list = ["/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/icon-label/gemini-2.0-flash_flag_no-label_label_value_bar_QA.json",
188
+ "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/icon-label/gemini-2.0-flash_flag_origin_label_value_bar_QA.json",
189
+ "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/icon-label/gemini-2.0-flash_no-label_icon_value_bar_QA.json",
190
+ "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/icon-label/gemini-2.0-flash_no-label_label_value_bar_QA.json",
191
+ "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/icon-label/gemini-2.0-flash_origin_icon_value_bar_QA.json",
192
+ "/Users/linminzhi/Documents/chartQA/code/QA/our_bar/icon-label/record/icon-label/gemini-2.0-flash_origin_label_value_bar_QA.json"]
193
+ for file in file_list:
194
+ count1 = eval_value(file)
195
+ # count2 = eval_value(file.replace("gemini-2.0-flash","new_gemini-2.0-flash"))
196
+ # count = {0:count1[0]+count2[0],1:count1[1]+count2[1]}
197
+ # print(count,count[1]/(count[0] + count[1]))
198
+ print()
199
+
200
+ # eval_value("/Users/linminzhi/Documents/chartQA/code/QA/our_bar/hard/gemini-2.0-flash_value_bar_info_QA.json")
201
+ # eval_value("/Users/linminzhi/Documents/chartQA/code/QA/our_bar/hard/gemini-2.0-flash_value_bar_plain_QA.json")
202
+ # eval_value("/Users/linminzhi/Documents/chartQA/code/QA/our_bar/radial/gemini-2.0-flash-exp_radial_difference_bar_QA_plain.json")
203
+ # eval_value("/Users/linminzhi/Documents/chartQA/code/QA/our_bar/radial/gemini-2.0-flash-exp_radial_extreme_bar_QA_info.json")
204
+ # eval_value("/Users/linminzhi/Documents/chartQA/code/QA/our_bar/radial/gemini-2.0-flash-exp_radial_extreme_bar_QA_plain.json")
205
+ # eval_value("/Users/linminzhi/Documents/chartQA/code/QA/our_bar/radial/gemini-2.0-flash-exp_radial_value_bar_QA_info.json")
206
+ # eval_value("/Users/linminzhi/Documents/chartQA/code/QA/our_bar/radial/gemini-2.0-flash-exp_radial_value_bar_QA_plain.json")