archit11 commited on
Commit
fab8cb4
·
verified ·
1 Parent(s): 0349e92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -50
app.py CHANGED
@@ -2,13 +2,13 @@ import os
2
  from threading import Thread
3
  from typing import Iterator, List, Tuple
4
  import json
 
5
 
6
  import gradio as gr
7
  import spaces
8
  import torch
9
  import transformers
10
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
11
- from flask import Flask, request, jsonify
12
 
13
  DESCRIPTION = """\
14
  # Zero GPU Model Comparison Arena
@@ -43,34 +43,23 @@ for model_id in MODEL_OPTIONS:
43
  if tokenizers[model_id].pad_token_id is None:
44
  tokenizers[model_id].pad_token_id = tokenizers[model_id].eos_token_id
45
 
46
- # Initialize Flask app
47
- app = Flask(__name__)
48
-
49
- @app.route('/log', methods=['POST'])
50
- def log_results():
51
- data = request.json
52
- # Here you can implement any additional processing or storage logic
53
- print("Logged:", json.dumps(data, indent=2))
54
- return jsonify({"status": "success"}), 200
55
-
56
- def prepare_input(model_id: str, message: str, chat_history: List[Tuple[str, str]]):
57
- if "OpenHathi" in model_id:
58
- # OpenHathi model doesn't use a specific chat template
59
- full_prompt = message
60
- for history_message in chat_history:
61
- full_prompt = f"{history_message[0]}\n{history_message[1]}\n{full_prompt}"
62
- return tokenizers[model_id](full_prompt, return_tensors="pt")
63
- elif "Navarna" in model_id:
64
- # Navarna model uses a chat template
65
- conversation = []
66
- for user, assistant in chat_history:
67
- conversation.extend([
68
- {"role": "user", "content": user},
69
- {"role": "assistant", "content": assistant},
70
- ])
71
- conversation.append({"role": "user", "content": message})
72
- prompt = tokenizers[model_id].apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
73
- return tokenizers[model_id](prompt, return_tensors="pt")
74
 
75
  @spaces.GPU(duration=90)
76
  def generate(
@@ -135,24 +124,7 @@ def compare_models(
135
 
136
  return chat_history1, chat_history2, chat_history1, chat_history2
137
 
138
- def log_comparison(model1_name: str, model2_name: str, question: str, answer1: str, answer2: str, winner: str = None):
139
- log_data = {
140
- "question": question,
141
- "model1": {"name": model1_name, "answer": answer1},
142
- "model2": {"name": model2_name, "answer": answer2},
143
- "winner": winner
144
- }
145
-
146
- # Send log data to Flask server
147
- import requests
148
- try:
149
- response = requests.post('http://144.24.151.32:5000/log', json=log_data)
150
- if response.status_code == 200:
151
- print("Successfully logged to server")
152
- else:
153
- print(f"Failed to log to server. Status code: {response.status_code}")
154
- except requests.RequestException as e:
155
- print(f"Error sending log to server: {e}")
156
 
157
  def vote_better(model1_name, model2_name, question, answer1, answer2, choice):
158
  winner = model1_name if choice == "Model 1" else model2_name
@@ -205,8 +177,7 @@ with gr.Blocks(css="style.css") as demo:
205
 
206
  if __name__ == "__main__":
207
  # Start Flask server in a separate thread
208
- flask_thread = Thread(target=app.run, kwargs={"host": "0.0.0.0", "port": 5000})
209
- flask_thread.start()
210
 
211
  # Start Gradio app with public link
212
- demo.queue(max_size=10).launch(share=True)
 
2
  from threading import Thread
3
  from typing import Iterator, List, Tuple
4
  import json
5
+ import requests
6
 
7
  import gradio as gr
8
  import spaces
9
  import torch
10
  import transformers
11
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
 
12
 
13
  DESCRIPTION = """\
14
  # Zero GPU Model Comparison Arena
 
43
  if tokenizers[model_id].pad_token_id is None:
44
  tokenizers[model_id].pad_token_id = tokenizers[model_id].eos_token_id
45
 
46
+ def log_comparison(model1_name: str, model2_name: str, question: str, answer1: str, answer2: str, winner: str = None):
47
+ log_data = {
48
+ "question": question,
49
+ "model1": {"name": model1_name, "answer": answer1},
50
+ "model2": {"name": model2_name, "answer": answer2},
51
+ "winner": winner
52
+ }
53
+
54
+ # Send log data to remote server
55
+ try:
56
+ response = requests.post('http://144.24.151.32:5000/log', json=log_data, timeout=5)
57
+ if response.status_code == 200:
58
+ print("Successfully logged to server")
59
+ else:
60
+ print(f"Failed to log to server. Status code: {response.status_code}")
61
+ except requests.RequestException as e:
62
+ print(f"Error sending log to server: {e}")
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  @spaces.GPU(duration=90)
65
  def generate(
 
124
 
125
  return chat_history1, chat_history2, chat_history1, chat_history2
126
 
127
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  def vote_better(model1_name, model2_name, question, answer1, answer2, choice):
130
  winner = model1_name if choice == "Model 1" else model2_name
 
177
 
178
  if __name__ == "__main__":
179
  # Start Flask server in a separate thread
180
+
 
181
 
182
  # Start Gradio app with public link
183
+ demo.queue(max_size=3).launch(share=True)