cheentastat / app34.py
SrijitMukherjee's picture
Rename app.py to app34.py
a2f70b3 verified
import pandas as pd
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Load your CSV file
df = pd.read_csv("your_file.csv")
# Create dropdowns for exam name, year, and problem number
exam_names = df["exam name"].unique()
year_options = df["year"].unique()
problem_numbers = df["problem number"].unique()
exam_dropdown = gr.Dropdown(exam_names, label="Exam Name")
year_dropdown = gr.Dropdown(year_options, label="Year")
problem_dropdown = gr.Dropdown(problem_numbers, label="Problem Number")
# Define the functions for the three buttons
def solve_problem(exam, year, problem):
problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem"].values[0]
prompt = f"Solve the following problem: {problem_statement}"
response = client.text_generation(prompt, max_new_tokens=512, temperature=0.7, top_p=0.95, model = "HuggingFaceH4/zephyr-7b-beta")
return response[0]['generated_text']
def give_hints(exam, year, problem):
problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem"].values[0]
prompt = f"Give hints for the following problem: {problem_statement}"
response = client.text_generation(prompt, max_new_tokens=512, temperature=0.7, top_p=0.95, model = "HuggingFaceH4/zephyr-7b-beta")
return response[0]['generated_text']
def create_similar_problem(exam, year, problem):
problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem"].values[0]
prompt = f"Create a similar problem to the following one: {problem_statement}"
response = client.text_generation(prompt, max_new_tokens=512, temperature=0.7, top_p=0.95, model = "HuggingFaceH4/zephyr-7b-beta")
return response[0]['generated_text']
# Define the chat response function
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
# Create Gradio interface with Blocks context
with gr.Blocks() as dropdown_interface:
with gr.Column():
exam_dropdown.render()
year_dropdown.render()
problem_dropdown.render()
solve_button = gr.Button("Solve Problem")
hints_button = gr.Button("Give Hints")
similar_problem_button = gr.Button("Create Similar Problem")
output_text = gr.Textbox(label="Output")
solve_button.click(solve_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
hints_button.click(give_hints, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
similar_problem_button.click(create_similar_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
chat_interface = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
# Combine both interfaces into a tabbed layout
tabbed_interface = gr.TabbedInterface(
[dropdown_interface, chat_interface],
["Problem Solver", "Chat Interface"]
)
# Launch the app
if __name__ == "__main__":
tabbed_interface.launch()