SrijitMukherjee commited on
Commit
1d5d586
·
verified ·
1 Parent(s): 9a1205d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +167 -102
app.py CHANGED
@@ -1,127 +1,192 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import pandas as pd
4
-
5
- """
6
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
- """
8
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- ################################################################
11
 
12
- # Load your CSV file
13
- df = pd.read_csv("your_file.csv")
 
 
 
14
 
15
- # Create dropdowns for exam name, year, and problem number
16
- exam_names = df["exam name"].unique()
17
- year_options = df["year"].unique()
18
- problem_numbers = df["problem number"].unique()
19
 
20
- exam_dropdown = gr.Dropdown(exam_names, label="Exam Name")
21
- year_dropdown = gr.Dropdown(year_options, label="Year")
22
- problem_dropdown = gr.Dropdown(problem_numbers, label="Problem Number")
23
 
24
- # Define the functions for the three buttons
25
- def solve_problem(exam, year, problem):
26
- problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
27
- prompt = f"Solve the following problem: {problem_statement}"
28
- response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
29
- return response.choices[0].text
30
 
31
- def give_hints(exam, year, problem):
32
- problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
33
- prompt = f"Give hints for the following problem: {problem_statement}"
34
- response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
35
- return response.choices[0].text
36
 
37
- def create_similar_problem(exam, year, problem):
38
- problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
39
- prompt = f"Create a similar problem to the following one: {problem_statement}"
40
- response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
41
- return response.choices[0].text
42
 
43
- ################################################################
44
 
45
  def respond(
46
- message,
47
- history: list[tuple[str, str]],
48
- system_message,
49
- max_tokens,
50
- temperature,
51
- top_p,
52
  ):
53
- messages = [{"role": "system", "content": system_message}]
54
 
55
- for val in history:
56
- if val[0]:
57
- messages.append({"role": "user", "content": val[0]})
58
- if val[1]:
59
- messages.append({"role": "assistant", "content": val[1]})
60
 
61
- messages.append({"role": "user", "content": message})
62
 
63
- response = ""
64
 
65
- for message in client.chat_completion(
66
- messages,
67
- max_tokens=max_tokens,
68
- stream=True,
69
- temperature=temperature,
70
- top_p=top_p,
71
- ):
72
- token = message.choices[0].delta.content
73
 
74
- response += token
75
- yield response
76
 
77
  """
78
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
79
  """
80
  demo = gr.ChatInterface(
81
- respond,
82
- additional_inputs=[
83
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
84
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
85
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
86
- gr.Slider(
87
- minimum=0.1,
88
- maximum=1.0,
89
- value=0.95,
90
- step=0.05,
91
- label="Top-p (nucleus sampling)",
92
- ),
93
- ],
94
- )
95
-
96
- ################################################################
97
-
98
- # Create Gradio interface with Blocks context
99
- with gr.Blocks() as dropdown_interface:
100
- with gr.Column():
101
- exam_dropdown.render()
102
- year_dropdown.render()
103
- problem_dropdown.render()
104
-
105
- solve_button = gr.Button("Solve Problem")
106
- hints_button = gr.Button("Give Hints")
107
- similar_problem_button = gr.Button("Create Similar Problem")
108
-
109
- output_text = gr.Textbox(label="Output")
110
-
111
- solve_button.click(solve_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
112
- hints_button.click(give_hints, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
113
- similar_problem_button.click(create_similar_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
114
-
115
- ################################################################
116
-
117
- # Combine both interfaces into a tabbed layout
118
- tabbed_interface = gr.TabbedInterface(
119
- [dropdown_interface, demo],
120
- ["Problem Solver", "Chat Interface"]
121
  )
122
 
123
- ################################################################
124
 
125
- # Launch the app
126
  if __name__ == "__main__":
127
- tabbed_interface.launch()
 
1
+ # import gradio as gr
2
+ # from huggingface_hub import InferenceClient
3
+ # import pandas as pd
4
+
5
+ # """
6
+ # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
+ # """
8
+ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+
10
+ # ################################################################
11
+
12
+ # # Load your CSV file
13
+ # df = pd.read_csv("your_file.csv")
14
+
15
+ # # Create dropdowns for exam name, year, and problem number
16
+ # exam_names = df["exam name"].unique()
17
+ # year_options = df["year"].unique()
18
+ # problem_numbers = df["problem number"].unique()
19
+
20
+ # exam_dropdown = gr.Dropdown(exam_names, label="Exam Name")
21
+ # year_dropdown = gr.Dropdown(year_options, label="Year")
22
+ # problem_dropdown = gr.Dropdown(problem_numbers, label="Problem Number")
23
+
24
+ # # Define the functions for the three buttons
25
+ # def solve_problem(exam, year, problem):
26
+ # problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
27
+ # prompt = f"Solve the following problem: {problem_statement}"
28
+ # response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
29
+ # return response.choices[0].text
30
+
31
+ # def give_hints(exam, year, problem):
32
+ # problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
33
+ # prompt = f"Give hints for the following problem: {problem_statement}"
34
+ # response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
35
+ # return response.choices[0].text
36
+
37
+ # def create_similar_problem(exam, year, problem):
38
+ # problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
39
+ # prompt = f"Create a similar problem to the following one: {problem_statement}"
40
+ # response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
41
+ # return response.choices[0].text
42
+
43
+ # ################################################################
44
+
45
+ # def respond(
46
+ # message,
47
+ # history: list[tuple[str, str]],
48
+ # system_message,
49
+ # max_tokens,
50
+ # temperature,
51
+ # top_p,
52
+ # ):
53
+ # messages = [{"role": "system", "content": system_message}]
54
+
55
+ # for val in history:
56
+ # if val[0]:
57
+ # messages.append({"role": "user", "content": val[0]})
58
+ # if val[1]:
59
+ # messages.append({"role": "assistant", "content": val[1]})
60
+
61
+ # messages.append({"role": "user", "content": message})
62
+
63
+ # response = ""
64
+
65
+ # for message in client.chat_completion(
66
+ # messages,
67
+ # max_tokens=max_tokens,
68
+ # stream=True,
69
+ # temperature=temperature,
70
+ # top_p=top_p,
71
+ # ):
72
+ # token = message.choices[0].delta.content
73
+
74
+ # response += token
75
+ # yield response
76
+
77
+ # """
78
+ # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
79
+ # """
80
+ # demo = gr.ChatInterface(
81
+ # respond,
82
+ # additional_inputs=[
83
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
84
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
85
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
86
+ # gr.Slider(
87
+ # minimum=0.1,
88
+ # maximum=1.0,
89
+ # value=0.95,
90
+ # step=0.05,
91
+ # label="Top-p (nucleus sampling)",
92
+ # ),
93
+ # ],
94
+ # )
95
+
96
+ # ################################################################
97
+
98
+ # # Create Gradio interface with Blocks context
99
+ # with gr.Blocks() as dropdown_interface:
100
+ # with gr.Column():
101
+ # exam_dropdown.render()
102
+ # year_dropdown.render()
103
+ # problem_dropdown.render()
104
+
105
+ # solve_button = gr.Button("Solve Problem")
106
+ # hints_button = gr.Button("Give Hints")
107
+ # similar_problem_button = gr.Button("Create Similar Problem")
108
+
109
+ # output_text = gr.Textbox(label="Output")
110
+
111
+ # solve_button.click(solve_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
112
+ # hints_button.click(give_hints, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
113
+ # similar_problem_button.click(create_similar_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
114
 
115
+ # ################################################################
116
 
117
+ # # Combine both interfaces into a tabbed layout
118
+ # tabbed_interface = gr.TabbedInterface(
119
+ # [dropdown_interface, demo],
120
+ # ["Problem Solver", "Chat Interface"]
121
+ # )
122
 
123
+ # ################################################################
 
 
 
124
 
125
+ # # Launch the app
126
+ # if __name__ == "__main__":
127
+ # tabbed_interface.launch()
128
 
 
 
 
 
 
 
129
 
130
+ import gradio as gr
131
+ from huggingface_hub import InferenceClient
 
 
 
132
 
133
+ """
134
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
135
+ """
136
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
137
 
 
138
 
139
  def respond(
140
+ message,
141
+ history: list[tuple[str, str]],
142
+ system_message,
143
+ max_tokens,
144
+ temperature,
145
+ top_p,
146
  ):
147
+ messages = [{"role": "system", "content": system_message}]
148
 
149
+ for val in history:
150
+ if val[0]:
151
+ messages.append({"role": "user", "content": val[0]})
152
+ if val[1]:
153
+ messages.append({"role": "assistant", "content": val[1]})
154
 
155
+ messages.append({"role": "user", "content": message})
156
 
157
+ response = ""
158
 
159
+ for message in client.chat_completion(
160
+ messages,
161
+ max_tokens=max_tokens,
162
+ stream=True,
163
+ temperature=temperature,
164
+ top_p=top_p,
165
+ ):
166
+ token = message.choices[0].delta.content
167
 
168
+ response += token
169
+ yield response
170
 
171
  """
172
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
173
  """
174
  demo = gr.ChatInterface(
175
+ respond,
176
+ additional_inputs=[
177
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
178
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
179
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
180
+ gr.Slider(
181
+ minimum=0.1,
182
+ maximum=1.0,
183
+ value=0.95,
184
+ step=0.05,
185
+ label="Top-p (nucleus sampling)",
186
+ ),
187
+ ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  )
189
 
 
190
 
 
191
  if __name__ == "__main__":
192
+ demo.launch()