ahmedab98 commited on
Commit
b75a70f
·
verified ·
1 Parent(s): 98b4261

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -321
app.py CHANGED
@@ -1,333 +1,82 @@
1
  import os
 
2
  import json
3
- import datetime
4
  import requests
5
- from email.utils import parseaddr
6
-
7
- import gradio as gr
8
- import pandas as pd
9
- import numpy as np
10
-
11
- from datasets import load_dataset, VerificationMode
12
- from apscheduler.schedulers.background import BackgroundScheduler
13
- from huggingface_hub import HfApi
14
-
15
- # InfoStrings
16
- from scorer import question_scorer
17
- from content import format_error, format_warning, format_log, TITLE, INTRODUCTION_TEXT, SUBMISSION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, model_hyperlink
18
-
19
- TOKEN = os.environ.get("TOKEN", None)
20
-
21
- OWNER="gaia-benchmark"
22
- DATA_DATASET = f"{OWNER}/GAIA"
23
- INTERNAL_DATA_DATASET = f"{OWNER}/GAIA_internal"
24
- SUBMISSION_DATASET = f"{OWNER}/submissions_internal"
25
- SUBMISSION_DATASET_PUBLIC = f"{OWNER}/submissions_public"
26
- CONTACT_DATASET = f"{OWNER}/contact_info"
27
- RESULTS_DATASET = f"{OWNER}/results_public"
28
- LEADERBOARD_PATH = f"{OWNER}/leaderboard"
29
- api = HfApi()
30
-
31
- YEAR_VERSION = "2023"
32
- ref_scores_len = {"validation": 165, "test": 301}
33
- ref_level_len = {"validation": {1: 53, 2: 86, 3: 26}, "test": {1: 93, 2: 159, 3: 49}}
34
-
35
- os.makedirs("scored", exist_ok=True)
36
-
37
- # Should be False on spaces and True outside
38
- LOCAL_DEBUG = False #os.environ.get("system", "") != "spaces"
39
-
40
- # Display the results
41
- eval_results = load_dataset(RESULTS_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
42
- contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
43
- def get_dataframe_from_results(eval_results, split):
44
- local_df = eval_results[split]
45
- local_df = local_df.map(lambda row: {"model": model_hyperlink(row["url"], row["model"])})
46
- local_df = local_df.remove_columns(["system_prompt", "url"])
47
- local_df = local_df.rename_column("model", "Agent name")
48
- local_df = local_df.rename_column("model_family", "Model family")
49
- local_df = local_df.rename_column("score", "Average score (%)")
50
- for i in [1, 2, 3]:
51
- local_df = local_df.rename_column(f"score_level{i}", f"Level {i} score (%)")
52
- local_df = local_df.rename_column("date", "Submission date")
53
- df = pd.DataFrame(local_df)
54
- df = df.sort_values(by=["Average score (%)"], ascending=False)
55
-
56
- numeric_cols = [c for c in local_df.column_names if "score" in c]
57
- df[numeric_cols] = df[numeric_cols].multiply(100).round(decimals=2)
58
- #df = df.style.format("{:.2%}", subset=numeric_cols)
59
-
60
- return df
61
-
62
- eval_dataframe_val = get_dataframe_from_results(eval_results=eval_results, split="validation")
63
- eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")
64
-
65
- # Gold answers
66
- gold_results = {}
67
- gold_dataset = load_dataset(INTERNAL_DATA_DATASET, f"{YEAR_VERSION}_all", token=TOKEN, trust_remote_code=True)
68
- gold_results = {split: {row["task_id"]: row for row in gold_dataset[split]} for split in ["test", "validation"]}
69
-
70
-
71
- def restart_space():
72
- api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)
73
-
74
- TYPES = ["markdown", "number", "number", "number", "number", "str", "str", "str"]
75
-
76
- def add_new_eval(
77
- val_or_test: str,
78
- model: str,
79
- model_family: str,
80
- system_prompt: str,
81
- url: str,
82
- path_to_file: str,
83
- organisation: str,
84
- mail: str,
85
- profile: gr.OAuthProfile,
86
- ):
87
- try:
88
- # Was the profile created less than 2 month ago?
89
- user_data = requests.get(f"https://huggingface.co/api/users/{profile.username}/overview")
90
- creation_date = json.loads(user_data.content)["createdAt"]
91
- if datetime.datetime.now() - datetime.datetime.strptime(creation_date, '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.timedelta(days=60):
92
- return format_error("This account is not authorized to submit on GAIA.")
93
-
94
-
95
- contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
96
- user_submission_dates = sorted(row["date"] for row in contact_infos[val_or_test] if row["username"] == profile.username)
97
- if len(user_submission_dates) > 0 and user_submission_dates[-1] == datetime.datetime.today().strftime('%Y-%m-%d'):
98
- return format_error("You already submitted once today, please try again tomorrow.")
99
-
100
-
101
- is_validation = val_or_test == "validation"
102
- # Very basic email parsing
103
- _, parsed_mail = parseaddr(mail)
104
- if not "@" in parsed_mail:
105
- return format_warning("Please provide a valid email adress.")
106
-
107
- print("Adding new eval")
108
-
109
- # Check if the combination model/org already exists and prints a warning message if yes
110
- if model.lower() in set([m.lower() for m in eval_results[val_or_test]["model"]]) and organisation.lower() in set([o.lower() for o in eval_results[val_or_test]["organisation"]]):
111
- return format_warning("This model has been already submitted.")
112
-
113
- if path_to_file is None:
114
- return format_warning("Please attach a file.")
115
-
116
- # SAVE UNSCORED SUBMISSION
117
- if LOCAL_DEBUG:
118
- print("mock uploaded submission")
119
- else:
120
- api.upload_file(
121
- repo_id=SUBMISSION_DATASET,
122
- path_or_fileobj=path_to_file.name,
123
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_raw_{datetime.datetime.today()}.jsonl",
124
- repo_type="dataset",
125
- token=TOKEN
126
- )
127
-
128
- # SAVE CONTACT
129
- contact_info = {
130
- "model": model,
131
- "model_family": model_family,
132
- "url": url,
133
- "organisation": organisation,
134
- "username": profile.username,
135
- "mail": mail,
136
- "date": datetime.datetime.today().strftime('%Y-%m-%d')
137
- }
138
- contact_infos[val_or_test]= contact_infos[val_or_test].add_item(contact_info)
139
- if LOCAL_DEBUG:
140
- print("mock uploaded contact info")
141
- else:
142
- contact_infos.push_to_hub(CONTACT_DATASET, config_name = YEAR_VERSION, token=TOKEN)
143
-
144
- # SCORE SUBMISSION
145
- file_path = path_to_file.name
146
- scores = {"all": 0, 1: 0, 2: 0, 3: 0}
147
- num_questions = {"all": 0, 1: 0, 2: 0, 3: 0}
148
- task_ids = []
149
- with open(f"scored/{organisation}_{model}.jsonl", "w") as scored_file:
150
- with open(file_path, 'r') as f:
151
- for ix, line in enumerate(f):
152
- try:
153
- task = json.loads(line)
154
- except Exception:
155
- return format_error(f"Line {ix} is incorrectly formatted. Please fix it and resubmit your file.")
156
-
157
- if "model_answer" not in task:
158
- return format_error(f"Line {ix} contains no model_answer key. Please fix it and resubmit your file.")
159
- answer = task["model_answer"]
160
- task_id = task["task_id"]
161
- try:
162
- level = int(gold_results[val_or_test][task_id]["Level"])
163
- except KeyError:
164
- return format_error(f"{task_id} not found in split {val_or_test}. Are you sure you submitted the correct file?")
165
-
166
- score = question_scorer(task['model_answer'], gold_results[val_or_test][task_id]["Final answer"])
167
-
168
- scored_file.write(
169
- json.dumps({
170
- "id": task_id,
171
- "model_answer": answer,
172
- "score": score,
173
- "level": level
174
- }) + "\n"
175
- )
176
- task_ids.append(task_id)
177
-
178
- scores["all"] += score
179
- scores[level] += score
180
- num_questions["all"] += 1
181
- num_questions[level] += 1
182
-
183
- # Check if there's any duplicate in the submission
184
- if len(task_ids) != len(set(task_ids)):
185
- return format_error("There are duplicates in your submission. Please check your file and resubmit it.")
186
-
187
- if any([num_questions[level] != ref_level_len[val_or_test][level] for level in [1, 2, 3]]):
188
- return format_error(f"Your submission has {num_questions[1]} questions for level 1, {num_questions[2]} for level 2, and {num_questions[3]} for level 3, but it should have {ref_level_len[val_or_test][1]}, {ref_level_len[val_or_test][2]}, and {ref_level_len[val_or_test][3]} respectively. Please check your submission.")
189
-
190
- # SAVE SCORED SUBMISSION
191
- if LOCAL_DEBUG:
192
- print("mock uploaded scored submission")
193
- else:
194
- api.upload_file(
195
- repo_id=SUBMISSION_DATASET,
196
- path_or_fileobj=f"scored/{organisation}_{model}.jsonl",
197
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_scored_{datetime.datetime.today()}.jsonl",
198
- repo_type="dataset",
199
- token=TOKEN
200
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
- # Save scored file
203
- if is_validation:
204
- api.upload_file(
205
- repo_id=SUBMISSION_DATASET_PUBLIC,
206
- path_or_fileobj=f"scored/{organisation}_{model}.jsonl",
207
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_scored_{datetime.datetime.today()}.jsonl",
208
- repo_type="dataset",
209
- token=TOKEN
210
- )
211
-
212
- # SAVE TO LEADERBOARD DATA
213
- eval_entry = {
214
- "model": model,
215
- "model_family": model_family,
216
- "system_prompt": system_prompt,
217
- "url": url,
218
- "organisation": organisation,
219
- "score": scores["all"]/ref_scores_len[val_or_test],
220
- "score_level1": scores[1]/num_questions[1],
221
- "score_level2": scores[2]/num_questions[2],
222
- "score_level3": scores[3]/num_questions[3],
223
- "date": datetime.datetime.today().strftime('%Y-%m-%d')
224
- }
225
- if num_questions[1] + num_questions[2] + num_questions[3] != ref_scores_len[val_or_test]:
226
- return format_error(f"Your submission has {len(scores['all'])} questions for the {val_or_test} set, but it should have {ref_scores_len[val_or_test]}. Please check your submission.")
227
- # Catching spam submissions of 100%
228
- if all((eval_entry[k] == 1 for k in ["score_level1", "score_level2", "score_level3"])):
229
- return format_error(f"There was a problem with your submission. Please open a discussion.")
230
-
231
- # Testing for duplicates - to see if we want to add something like it as it would allow people to try to see the content of other submissions
232
- #eval_entry_no_date = {k: v for k, v in eval_entry if k != "date"}
233
- #columns_no_date = [c for c in eval_results[val_or_test].column_names if c != "date"]
234
- #if eval_entry_no_date in eval_results[val_or_test].select_columns(columns_no_date):
235
- # return format_error(f"Your submission is an exact duplicate from an existing submission.")
236
-
237
- eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
238
- print(eval_results)
239
- if LOCAL_DEBUG:
240
- print("mock uploaded results to lb")
241
- else:
242
- eval_results.push_to_hub(RESULTS_DATASET, config_name = YEAR_VERSION, token=TOKEN)
243
-
244
-
245
- return format_log(f"Model {model} submitted by {organisation} successfully.\nPlease wait a few hours and refresh the leaderboard to see your score displayed.")
246
- except Exception as e:
247
- print(e)
248
- return format_error(f"An error occurred, please open a discussion and indicate at what time you encountered the error.\n")
249
-
250
-
251
- def refresh():
252
- eval_results = load_dataset(RESULTS_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS,trust_remote_code=True)
253
- eval_dataframe_val = get_dataframe_from_results(eval_results=eval_results, split="validation")
254
- eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")
255
- return eval_dataframe_val, eval_dataframe_test
256
-
257
- def upload_file(files):
258
- file_paths = [file.name for file in files]
259
- return file_paths
260
-
261
-
262
- demo = gr.Blocks()
263
- with demo:
264
- gr.HTML(TITLE)
265
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
266
 
267
  with gr.Row():
268
- with gr.Accordion("📙 Citation", open=False):
269
- citation_button = gr.Textbox(
270
- value=CITATION_BUTTON_TEXT,
271
- label=CITATION_BUTTON_LABEL,
272
- elem_id="citation-button",
273
- ) #.style(show_copy_button=True)
274
 
275
- with gr.Tab("Results: Test"):
276
- leaderboard_table_test = gr.components.Dataframe(
277
- value=eval_dataframe_test, datatype=TYPES, interactive=False,
278
- column_widths=["20%"]
279
- )
280
- with gr.Tab("Results: Validation"):
281
- leaderboard_table_val = gr.components.Dataframe(
282
- value=eval_dataframe_val, datatype=TYPES, interactive=False,
283
- column_widths=["20%"]
284
- )
285
 
286
- refresh_button = gr.Button("Refresh")
287
- refresh_button.click(
288
- refresh,
289
- inputs=[],
290
- outputs=[
291
- leaderboard_table_val,
292
- leaderboard_table_test,
293
- ],
294
- )
295
- with gr.Accordion("Submit a new model for evaluation"):
296
- with gr.Row():
297
- gr.Markdown(SUBMISSION_TEXT, elem_classes="markdown-text")
298
- with gr.Row():
299
- with gr.Column():
300
- level_of_test = gr.Radio(["validation", "test"], value="validation", label="Split")
301
- model_name_textbox = gr.Textbox(label="Agent name")
302
- model_family_textbox = gr.Textbox(label="Model family")
303
- system_prompt_textbox = gr.Textbox(label="System prompt example")
304
- url_textbox = gr.Textbox(label="Url to model information")
305
- with gr.Column():
306
- organisation = gr.Textbox(label="Organisation")
307
- mail = gr.Textbox(label="Contact email (will be stored privately, & used if there is an issue with your submission)")
308
- file_output = gr.File()
309
 
 
 
 
 
310
 
311
- with gr.Row():
312
- gr.LoginButton()
313
- submit_button = gr.Button("Submit Eval")
314
- submission_result = gr.Markdown()
315
- submit_button.click(
316
- add_new_eval,
317
- [
318
- level_of_test,
319
- model_name_textbox,
320
- model_family_textbox,
321
- system_prompt_textbox,
322
- url_textbox,
323
- file_output,
324
- organisation,
325
- mail
326
- ],
327
- submission_result,
328
- )
329
 
330
- scheduler = BackgroundScheduler()
331
- scheduler.add_job(restart_space, "interval", seconds=3600)
332
- scheduler.start()
333
- demo.launch(debug=True)
 
1
  import os
2
+ import gradio as gr
3
  import json
 
4
  import requests
5
+ from openai import OpenAI
6
+
7
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
8
+ MODEL = "gpt-4o"
9
+
10
+ # Charger questions
11
+ def load_questions():
12
+ with open("questions.json", "r", encoding="utf-8") as f:
13
+ return json.load(f)
14
+
15
+ # Générer réponses
16
+ def generate_answers():
17
+ questions = load_questions()
18
+ answers = []
19
+
20
+ for q in questions:
21
+ prompt = f"Réponds précisément à cette question : {q['question']}"
22
+ try:
23
+ response = client.chat.completions.create(
24
+ model=MODEL,
25
+ messages=[{"role": "user", "content": prompt}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  )
27
+ answer = response.choices[0].message.content.strip()
28
+ except Exception as e:
29
+ answer = f"Erreur: {str(e)}"
30
+ answers.append({"task_id": q["task_id"], "question": q["question"], "answer": answer})
31
+ return answers
32
+
33
+ # Affichage lisible
34
+ def format_display(answers):
35
+ return "\n\n".join([f"🟨 Q: {a['question']}\n🟩 A: {a['answer']}" for a in answers])
36
+
37
+ # Fonction de soumission à GAIA
38
+ def submit_to_gaia(username, agent_url, answers):
39
+ payload = {
40
+ "username": username,
41
+ "agent_link": agent_url,
42
+ "answers": [{"task_id": a["task_id"], "answer": a["answer"]} for a in answers]
43
+ }
44
+ response = requests.post("https://gaia-benchmark.vercel.app/submit", json=payload)
45
+ if response.status_code == 200:
46
+ return f"✅ Soumission réussie ! Score : {response.json().get('score', '?')}"
47
+ else:
48
+ return f"❌ Erreur de soumission : {response.text}"
49
+
50
+ # Variables de session
51
+ answers_state = []
52
+
53
+ # UI avec Gradio Blocks
54
+ with gr.Blocks() as demo:
55
+ gr.Markdown("# 🧠 GAIA Final Agent\nRéponds à 20 questions et soumets-les automatiquement à GAIA")
56
+ with gr.Row():
57
+ username_input = gr.Text(label="👤 Nom d'utilisateur Hugging Face")
58
+ agent_url_input = gr.Text(label="🔗 URL du Space Hugging Face")
59
 
60
+ output_display = gr.Textbox(label="📄 Résultats (réponses générées)", lines=20)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  with gr.Row():
63
+ gen_btn = gr.Button("🧠 Générer les réponses")
64
+ submit_btn = gr.Button("📤 Soumettre à GAIA")
 
 
 
 
65
 
66
+ result_display = gr.Textbox(label=" Statut de soumission", lines=2)
 
 
 
 
 
 
 
 
 
67
 
68
+ def handle_generate():
69
+ global answers_state
70
+ answers_state = generate_answers()
71
+ return format_display(answers_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
+ def handle_submit(username, agent_url):
74
+ if not answers_state:
75
+ return "❌ Veuillez d'abord générer les réponses."
76
+ return submit_to_gaia(username, agent_url, answers_state)
77
 
78
+ gen_btn.click(fn=handle_generate, outputs=output_display)
79
+ submit_btn.click(fn=handle_submit, inputs=[username_input, agent_url_input], outputs=result_display)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ if __name__ == "__main__":
82
+ demo.launch()