utkarsh1302 commited on
Commit
d398f66
·
verified ·
1 Parent(s): 6b745a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -200
app.py CHANGED
@@ -1,204 +1,120 @@
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
- import pandas as pd
4
- from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
-
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
- )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- from src.submission.submit import add_new_eval
30
-
31
-
32
- def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
34
-
35
- ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
41
- except Exception:
42
- restart_space()
43
  try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
48
- except Exception:
49
- restart_space()
50
-
51
-
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
- (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
-
60
- def init_leaderboard(dataframe):
61
- if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
63
- return Leaderboard(
64
- value=dataframe,
65
- datatype=[c.type for c in fields(AutoEvalColumn)],
66
- select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
- ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
- bool_checkboxgroup_label="Hide models",
88
- interactive=False,
89
- )
90
-
91
-
92
- demo = gr.Blocks(css=custom_css)
93
- with demo:
94
- gr.HTML(TITLE)
95
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
-
97
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
- leaderboard = init_leaderboard(LEADERBOARD_DF)
100
-
101
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
102
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
-
104
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
  with gr.Row():
145
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
146
-
 
 
 
 
 
 
 
 
147
  with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
-
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
-
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
- )
190
-
191
- with gr.Row():
192
- with gr.Accordion("📙 Citation", open=False):
193
- citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
197
- elem_id="citation-button",
198
- show_copy_button=True,
199
- )
200
-
201
- scheduler = BackgroundScheduler()
202
- scheduler.add_job(restart_space, "interval", seconds=1800)
203
- scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoModel, AutoTokenizer
4
+ from PIL import Image
5
+ import os
6
+
7
+ # --- Model Loading ---
8
+ # This section loads the model and tokenizer from Hugging Face.
9
+ # It's set to use bfloat16 for efficiency.
10
+ # Using a try-except block to handle potential errors during model loading.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  try:
12
+ model_id = "ByteDance-Seed/BAGEL-7B-MoT"
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
14
+ model = AutoModel.from_pretrained(
15
+ model_id,
16
+ torch_dtype=torch.bfloat16,
17
+ low_cpu_mem_usage=True,
18
+ trust_remote_code=True
19
+ ).cuda().eval()
20
+ print("Model loaded successfully.")
21
+ except Exception as e:
22
+ print(f"Error loading model: {e}")
23
+ # Display an error in the Gradio interface if the model fails to load
24
+ with gr.Blocks() as demo:
25
+ gr.Markdown("# 🚨 Error")
26
+ gr.Markdown(f"Failed to load the BAGEL-7B-MoT model. Please check the logs in the Hugging Face Space for more details. Error: {e}")
27
+ demo.launch()
28
+ # Exit if the model cannot be loaded
29
+ exit()
30
+
31
+ # --- Core Functions for Each Task ---
32
+
33
+ def generate_image_from_text(text_prompt):
34
+ """Generates an image based on a text prompt."""
35
+ if not text_prompt:
36
+ return None, "Please provide a text prompt."
37
+ try:
38
+ inputs = tokenizer(text=text_prompt, return_tensors='pt')
39
+ inputs = {k: v.cuda() for k, v in inputs.items()}
40
+ # Generate the image
41
+ image = model.generate_image(**inputs)[0]
42
+ return image, "Image generated successfully."
43
+ except Exception as e:
44
+ return None, f"An error occurred: {e}"
45
+
46
+ def understand_image(image, question):
47
+ """Answers a question about an uploaded image."""
48
+ if image is None or not question:
49
+ return "Please upload an image and ask a question."
50
+ try:
51
+ # The model expects a list of PIL images
52
+ pil_image = Image.fromarray(image).convert('RGB')
53
+ inputs = tokenizer(text=question, images=[pil_image], return_tensors='pt')
54
+ inputs = {k: v.cuda() for k, v in inputs.items()}
55
+ # Generate the textual response
56
+ generated_ids = model.generate(**inputs)
57
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
58
+ return response
59
+ except Exception as e:
60
+ return f"An error occurred: {e}"
61
+
62
+ def edit_image(image, instruction):
63
+ """Edits an image based on a given instruction."""
64
+ if image is None or not instruction:
65
+ return None, "Please upload an image and provide an editing instruction."
66
+ try:
67
+ pil_image = Image.fromarray(image).convert('RGB')
68
+ # For image editing, the task needs to be specified
69
+ inputs = tokenizer(text=instruction, images=[pil_image], return_tensors='pt', task='image-editing')
70
+ inputs = {k: v.cuda() for k, v in inputs.items()}
71
+ # Generate the edited image
72
+ edited_image = model.generate_image(**inputs)[0]
73
+ return edited_image, "Image edited successfully."
74
+ except Exception as e:
75
+ return None, f"An error occurred: {e}"
76
+
77
+ # --- Gradio Interface ---
78
+ # We use Gradio Blocks to create a tabbed interface for the three functionalities.
79
+
80
+ with gr.Blocks(theme=gr.themes.Soft(), title="Multimodal BAGEL App") as demo:
81
+ gr.Markdown("# 🎨 Multimodal BAGEL App")
82
+ gr.Markdown("A prototype showcasing the capabilities of the `ByteDance-Seed/BAGEL-7B-MoT` model. Deployed on Hugging Face Spaces.")
83
+
84
+ with gr.Tabs():
85
+ # --- Text-to-Image Tab ---
86
+ with gr.TabItem("Text-to-Image Generation"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  with gr.Row():
88
+ with gr.Column(scale=1):
89
+ t2i_prompt = gr.Textbox(lines=4, label="Prompt", placeholder="e.g., A photo of a bagel on a beach at sunset.")
90
+ t2i_button = gr.Button("Generate Image", variant="primary")
91
+ with gr.Column(scale=1):
92
+ t2i_output_image = gr.Image(label="Generated Image", show_label=True)
93
+ t2i_status = gr.Textbox(label="Status", interactive=False)
94
+ t2i_button.click(generate_image_from_text, inputs=[t2i_prompt], outputs=[t2i_output_image, t2i_status])
95
+
96
+ # --- Image Understanding Tab ---
97
+ with gr.TabItem("Image Understanding"):
98
  with gr.Row():
99
+ with gr.Column(scale=1):
100
+ iu_input_image = gr.Image(type="numpy", label="Upload Image")
101
+ iu_question = gr.Textbox(label="Question", placeholder="e.g., What is in this image?")
102
+ iu_button = gr.Button("Ask", variant="primary")
103
+ with gr.Column(scale=1):
104
+ iu_answer = gr.Textbox(label="Answer", lines=10, interactive=False)
105
+ iu_button.click(understand_image, inputs=[iu_input_image, iu_question], outputs=[iu_answer])
106
+
107
+ # --- Image Editing Tab ---
108
+ with gr.TabItem("Image Editing"):
109
+ with gr.Row():
110
+ with gr.Column(scale=1):
111
+ ie_input_image = gr.Image(type="numpy", label="Upload Image to Edit")
112
+ ie_instruction = gr.Textbox(label="Editing Instruction", placeholder="e.g., Make the sky a vibrant pink.")
113
+ ie_button = gr.Button("Apply Edit", variant="primary")
114
+ with gr.Column(scale=1):
115
+ ie_output_image = gr.Image(label="Edited Image")
116
+ ie_status = gr.Textbox(label="Status", interactive=False)
117
+ ie_button.click(edit_image, inputs=[ie_input_image, ie_instruction], outputs=[ie_output_image, ie_status])
118
+
119
+ # Launch the Gradio app
120
+ demo.launch()