plcedoz38 commited on
Commit
87d57d6
·
1 Parent(s): c1eb1f9
Files changed (3) hide show
  1. app.py +217 -4
  2. navigation.py +192 -0
  3. requirements.txt +2 -0
app.py CHANGED
@@ -1,7 +1,220 @@
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
1
+ import subprocess
2
+
3
+ subprocess.run(
4
+ "pip install flash-attn --no-build-isolation", env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, shell=True
5
+ )
6
+
7
+ from typing import Any, List
8
+
9
  import gradio as gr
10
+ import requests
11
+ import spaces
12
+ import torch
13
+ from PIL import Image, ImageDraw
14
+ from transformers import AutoModelForImageTextToText, AutoProcessor
15
+ from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize
16
+
17
+ from . import navigation
18
+
19
+ # --- Configuration ---
20
+ MODEL_ID = "Hcompany/Holo1-7B"
21
+
22
+ # --- Model and Processor Loading (Load once) ---
23
+ print(f"Loading model and processor for {MODEL_ID}...")
24
+ model = None
25
+ processor = None
26
+ model_loaded = False
27
+ load_error_message = ""
28
+
29
+ try:
30
+ model = AutoModelForImageTextToText.from_pretrained(
31
+ MODEL_ID, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", trust_remote_code=True
32
+ ).to("cuda")
33
+ processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
34
+ model_loaded = True
35
+ print("Model and processor loaded successfully.")
36
+ except Exception as e:
37
+ load_error_message = (
38
+ f"Error loading model/processor: {e}\n"
39
+ "This might be due to network issues, an incorrect model ID, or missing dependencies (like flash_attention_2 if enabled by default in some config).\n"
40
+ "Ensure you have a stable internet connection and the necessary libraries installed."
41
+ )
42
+ print(load_error_message)
43
+
44
+ # --- Helper functions from the model card (or adapted) ---
45
+
46
+
47
+ @spaces.GPU(duration=120)
48
+ def run_inference_localization(
49
+ messages_for_template: List[dict[str, Any]], pil_image_for_processing: Image.Image
50
+ ) -> str:
51
+ model.to("cuda")
52
+ torch.cuda.set_device(0)
53
+ """
54
+ Runs inference using the Holo1 model.
55
+ - messages_for_template: The prompt structure, potentially including the PIL image object
56
+ (which apply_chat_template converts to an image tag).
57
+ - pil_image_for_processing: The actual PIL image to be processed into tensors.
58
+ """
59
+ # 1. Apply chat template to messages. This will create the text part of the prompt,
60
+ # including image tags if the image was part of `messages_for_template`.
61
+ text_prompt = processor.apply_chat_template(messages_for_template, tokenize=False, add_generation_prompt=True)
62
+
63
+ # 2. Process text and image together to get model inputs
64
+ inputs = processor(
65
+ text=[text_prompt],
66
+ images=[pil_image_for_processing], # Provide the actual image data here
67
+ padding=True,
68
+ return_tensors="pt",
69
+ )
70
+ inputs = inputs.to(model.device)
71
+
72
+ # 3. Generate response
73
+ # Using do_sample=False for more deterministic output, as in the model card's structured output example
74
+ generated_ids = model.generate(**inputs, max_new_tokens=128, do_sample=False)
75
+
76
+ # 4. Trim input_ids from generated_ids to get only the generated part
77
+ generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
78
+
79
+ # 5. Decode the generated tokens
80
+ decoded_output = processor.batch_decode(
81
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
82
+ )
83
+
84
+ return decoded_output[0] if decoded_output else ""
85
+
86
+
87
+ # --- Gradio processing function ---
88
+ def navigate(input_pil_image: Image.Image, task: str) -> str:
89
+ if not model_loaded or not processor or not model:
90
+ return f"Model not loaded. Error: {load_error_message}", None
91
+ if not input_pil_image:
92
+ return "No image provided. Please upload an image.", None
93
+ if not task or task.strip() == "":
94
+ return "No task provided. Please type an task.", input_pil_image.copy().convert("RGB")
95
+
96
+ # 1. Prepare image: Resize according to model's image processor's expected properties
97
+ # This ensures predicted coordinates match the (resized) image dimensions.
98
+ image_proc_config = processor.image_processor
99
+ try:
100
+ resized_height, resized_width = smart_resize(
101
+ input_pil_image.height,
102
+ input_pil_image.width,
103
+ factor=image_proc_config.patch_size * image_proc_config.merge_size,
104
+ min_pixels=image_proc_config.min_pixels,
105
+ max_pixels=image_proc_config.max_pixels,
106
+ )
107
+ # Using LANCZOS for resampling as it's generally good for downscaling.
108
+ # The model card used `resample=None`, which might imply nearest or default.
109
+ # For visual quality in the demo, LANCZOS is reasonable.
110
+ resized_image = input_pil_image.resize(
111
+ size=(resized_width, resized_height),
112
+ resample=Image.Resampling.LANCZOS, # type: ignore
113
+ )
114
+ except Exception as e:
115
+ print(f"Error resizing image: {e}")
116
+ return f"Error resizing image: {e}", input_pil_image.copy().convert("RGB")
117
+
118
+ # 2. Create the prompt using the resized image (for correct image tagging context) and task
119
+ prompt = navigation.get_navigation_prompt(task, resized_image, step=1)
120
+
121
+ # 3. Run inference
122
+ # Pass `messages` (which includes the image object for template processing)
123
+ # and `resized_image` (for actual tensor conversion).
124
+ try:
125
+ navigation_str = run_inference_localization(prompt, resized_image)
126
+ except Exception as e:
127
+ print(f"Error during model inference: {e}")
128
+ return f"Error during model inference: {e}", resized_image.copy().convert("RGB")
129
+
130
+ return navigation_str
131
+ # return navigation.NavigationStep(**json.loads(navigation_str))
132
+
133
+
134
+ # --- Load Example Data ---
135
+ example_image = None
136
+ example_task = "Book a hotel in Paris on August 3rd for 3 nights"
137
+ try:
138
+ example_image_url = "https://huggingface.co/Hcompany/Holo1-7B/resolve/main/calendar_example.jpg"
139
+ example_image = Image.open(requests.get(example_image_url, stream=True).raw)
140
+ except Exception as e:
141
+ print(f"Could not load example image from URL: {e}")
142
+ # Create a placeholder image if loading fails, so Gradio example still works
143
+ try:
144
+ example_image = Image.new("RGB", (200, 150), color="lightgray")
145
+ draw = ImageDraw.Draw(example_image)
146
+ draw.text((10, 10), "Example image\nfailed to load", fill="black")
147
+ except: # If PIL itself is an issue (unlikely here but good for robustness)
148
+ pass
149
+
150
+
151
+ # --- Gradio Interface Definition ---
152
+ title = "Holo1-7B: Action VLM Navigation Demo"
153
+ description = """
154
+ This demo showcases **Holo1-7B**, an Action Vision-Language Model developed by HCompany, fine-tuned from Qwen/Qwen2.5-VL-7B-Instruct.
155
+ It's designed to interact with web interfaces like a human user. Here, we demonstrate its UI localization capability.
156
+
157
+ **How to use:**
158
+ 1. Upload an image (e.g., a screenshot of a UI, like the calendar example).
159
+ 2. Provide a textual task (e.g., "Book a hotel in Paris on August 3rd for 3 nights").
160
+ 3. The model will predict the navigation step.
161
+
162
+ The model processes a resized version of your input image. Coordinates are relative to this resized image.
163
+ """
164
+ article = f"""
165
+ <p style='text-align: center'>
166
+ Model: <a href='https://huggingface.co/{MODEL_ID}' target='_blank'>{MODEL_ID}</a> by HCompany |
167
+ Paper: <a href='https://cdn.prod.website-files.com/67e2dbd9acff0c50d4c8a80c/683ec8095b353e8b38317f80_h_tech_report_v1.pdf' target='_blank'>HCompany Tech Report</a> |
168
+ Blog: <a href='https://www.hcompany.ai/surfer-h' target='_blank'>Surfer-H Blog Post</a>
169
+ </p>
170
+ """
171
+
172
+ if not model_loaded:
173
+ with gr.Blocks() as demo:
174
+ gr.Markdown(f"# <center>⚠️ Error: Model Failed to Load ⚠️</center>")
175
+ gr.Markdown(f"<center>{load_error_message}</center>")
176
+ gr.Markdown(
177
+ "<center>Please check the console output for more details. Reloading the space might help if it's a temporary issue.</center>"
178
+ )
179
+ else:
180
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
181
+ gr.Markdown(f"<h1 style='text-align: center;'>{title}</h1>")
182
+ # gr.Markdown(description)
183
+
184
+ with gr.Row():
185
+ with gr.Column(scale=1):
186
+ input_image_component = gr.Image(type="pil", label="Input UI Image", height=400)
187
+ task_component = gr.Textbox(
188
+ label="task",
189
+ placeholder="e.g., Click the 'Login' button",
190
+ info="Type the action you want the model to localize on the image.",
191
+ )
192
+ submit_button = gr.Button("Localize Click", variant="primary")
193
+
194
+ with gr.Column(scale=1):
195
+ output_coords_component = gr.Textbox(
196
+ label="Predicted Coordinates (Format: Click(x,y))", interactive=False
197
+ )
198
+ output_image_component = gr.Image(
199
+ type="pil", label="Image with Predicted Click Point", height=400, interactive=False
200
+ )
201
+
202
+ if example_image:
203
+ gr.Examples(
204
+ examples=[[example_image, example_task]],
205
+ inputs=[input_image_component, task_component],
206
+ outputs=[output_coords_component, output_image_component],
207
+ fn=navigate,
208
+ cache_examples="lazy",
209
+ )
210
+
211
+ gr.Markdown(article)
212
 
213
+ submit_button.click(
214
+ fn=navigate,
215
+ inputs=[input_image_component, task_component],
216
+ outputs=[output_coords_component, output_image_component],
217
+ )
218
 
219
+ if __name__ == "__main__":
220
+ demo.launch(debug=True)
navigation.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ SYSTEM_PROMPT: str = """Imagine you are a robot browsing the web, just like humans. Now you need to complete a task.
6
+ In each iteration, you will receive an Observation that includes the last screenshots of a web browser and the current memory of the agent.
7
+ You have also information about the step that the agent is trying to achieve to solve the task.
8
+ Carefully analyze the visual information to identify what to do, then follow the guidelines to choose the following action.
9
+ You should detail your thought (i.e. reasoning steps) before taking the action.
10
+ Also detail in the notes field of the action the extracted information relevant to solve the task.
11
+ Once you have enough information in the notes to answer the task, return an answer action with the detailed answer in the notes field.
12
+ This will be evaluated by an evaluator and should match all the criteria or requirements of the task.
13
+
14
+ Guidelines:
15
+ - store in the notes all the relevant information to solve the task that fulfill the task criteria. Be precise
16
+ - Use both the task and the step information to decide what to do
17
+ - if you want to write in a text field and the text field already has text, designate the text field by the text it contains and its type
18
+ - If there is a cookies notice, always accept all the cookies first
19
+ - The observation is the screenshot of the current page and the memory of the agent.
20
+ - If you see relevant information on the screenshot to answer the task, add it to the notes field of the action.
21
+ - If there is no relevant information on the screenshot to answer the task, add an empty string to the notes field of the action.
22
+ - If you see buttons that allow to navigate directly to relevant information, like jump to ... or go to ... , use them to navigate faster.
23
+ - In the answer action, give as many details a possible relevant to answering the task.
24
+ - if you want to write, don't click before. Directly use the write action
25
+ - to write, identify the web element which is type and the text it already contains
26
+ - If you want to use a search bar, directly write text in the search bar
27
+ - Don't scroll too much. Don't scroll if the number of scrolls is greater than 3
28
+ - Don't scroll if you are at the end of the webpage
29
+ - Only refresh if you identify a rate limit problem
30
+ - If you are looking for a single flights, click on round-trip to select 'one way'
31
+ - Never try to login, enter email or password. If there is a need to login, then go back.
32
+ - If you are facing a captcha on a website, try to solve it.
33
+
34
+ - if you have enough information in the screenshot and in the notes to answer the task, return an answer action with the detailed answer in the notes field
35
+ - The current date is {timestamp}.
36
+
37
+ # <output_json_format>
38
+ # ```json
39
+ # {output_format}
40
+ # ```
41
+ # </output_json_format>
42
+
43
+ """
44
+
45
+
46
+ class ClickElementAction(BaseModel):
47
+ """Click at absolute coordinates of a web element with its description"""
48
+
49
+ action: Literal["click_element"] = Field(description="Click at absolute coordinates of a web element")
50
+ element: str = Field(description="text description of the element")
51
+ x: int = Field(description="The x coordinate, number of pixels from the left edge.")
52
+ y: int = Field(description="The y coordinate, number of pixels from the top edge.")
53
+
54
+ def log(self):
55
+ return f"I have clicked on the element '{self.element}' at absolute coordinates {self.x}, {self.y}"
56
+
57
+
58
+ class WriteElementAction(BaseModel):
59
+ """Write content at absolute coordinates of a web element identified by its description, then press Enter."""
60
+
61
+ action: Literal["write_element_abs"] = Field(description="Write content at absolute coordinates of a web page")
62
+ content: str = Field(description="Content to write")
63
+ element: str = Field(description="Text description of the element")
64
+ x: int = Field(description="The x coordinate, number of pixels from the left edge.")
65
+ y: int = Field(description="The y coordinate, number of pixels from the top edge.")
66
+
67
+ def log(self):
68
+ return f"I have written '{self.content}' in the element '{self.element}' at absolute coordinates {self.x}, {self.y}"
69
+
70
+
71
+ class ScrollAction(BaseModel):
72
+ """Scroll action with no required element"""
73
+
74
+ action: Literal["scroll"] = Field(description="Scroll the page or a specific element")
75
+ direction: Literal["down", "up", "left", "right"] = Field(description="The direction to scroll in")
76
+
77
+ def log(self):
78
+ return f"I have scrolled {self.direction}"
79
+
80
+
81
+ class GoBackAction(BaseModel):
82
+ """Action to navigate back in browser history"""
83
+
84
+ action: Literal["go_back"] = Field(description="Navigate to the previous page")
85
+
86
+ def log(self):
87
+ return "I have gone back to the previous page"
88
+
89
+
90
+ class RefreshAction(BaseModel):
91
+ """Action to refresh the current page"""
92
+
93
+ action: Literal["refresh"] = Field(description="Refresh the current page")
94
+
95
+ def log(self):
96
+ return "I have refreshed the page"
97
+
98
+
99
+ class GotoAction(BaseModel):
100
+ """Action to go to a particular URL"""
101
+
102
+ action: Literal["goto"] = Field(description="Goto a particular URL")
103
+ url: str = Field(description="A url starting with http:// or https://")
104
+
105
+ def log(self):
106
+ return f"I have navigated to the URL {self.url}"
107
+
108
+
109
+ class WaitAction(BaseModel):
110
+ """Action to wait for a particular amount of time"""
111
+
112
+ action: Literal["wait"] = Field(description="Wait for a particular amount of time")
113
+ seconds: int = Field(default=2, ge=0, le=10, description="The number of seconds to wait")
114
+
115
+ def log(self):
116
+ return f"I have waited for {self.seconds} seconds"
117
+
118
+
119
+ class RestartAction(BaseModel):
120
+ """Restart the task from the beginning."""
121
+
122
+ action: Literal["restart"] = "restart"
123
+
124
+ def log(self):
125
+ return "I have restarted the task from the beginning"
126
+
127
+
128
+ class AnswerAction(BaseModel):
129
+ """Return a final answer to the task. This is the last action to call in an episode."""
130
+
131
+ action: Literal["answer"] = "answer"
132
+ content: str = Field(description="The answer content")
133
+
134
+ def log(self):
135
+ return f"I have answered the task with '{self.content}'"
136
+
137
+
138
+ ActionSpace = (
139
+ ClickElementAction
140
+ | WriteElementAction
141
+ | ScrollAction
142
+ | GoBackAction
143
+ | RefreshAction
144
+ | WaitAction
145
+ | RestartAction
146
+ | AnswerAction
147
+ | GotoAction
148
+ )
149
+
150
+
151
+ class NavigationStep(BaseModel):
152
+ note: str = Field(
153
+ default="",
154
+ description="Task-relevant information extracted from the previous observation. Keep empty if no new info.",
155
+ )
156
+ thought: str = Field(description="Reasoning about next steps (<4 lines)")
157
+ action: ActionSpace = Field(description="Next action to take")
158
+
159
+
160
+ def get_navigation_prompt(task, image, step=1):
161
+ """
162
+ Get the prompt for the navigation task.
163
+ - task: The task to complete
164
+ - image: The current screenshot of the web page
165
+ - step: The current step of the task
166
+ """
167
+ system_prompt = SYSTEM_PROMPT.format(
168
+ output_format=NavigationStep.model_json_schema(),
169
+ timestamp="2025-06-04 14:16:03",
170
+ )
171
+ return [
172
+ {
173
+ "role": "system",
174
+ "content": [
175
+ {"type": "text", "text": system_prompt},
176
+ ],
177
+ },
178
+ {
179
+ "role": "user",
180
+ "content": [
181
+ {"type": "text", "text": f"<task>\n{task}\n</task>\n"},
182
+ {"type": "text", "text": f"<observation step={step}>\n"},
183
+ {"type": "text", "text": "<screenshot>\n"},
184
+ {
185
+ "type": "image",
186
+ "image": image,
187
+ },
188
+ {"type": "text", "text": "\n</screenshot>\n"},
189
+ {"type": "text", "text": "\n</observation>\n"},
190
+ ],
191
+ },
192
+ ]
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ accelerate