ntaexams commited on
Commit
41fa2bd
·
verified ·
1 Parent(s): c5ed1e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -25
app.py CHANGED
@@ -2,35 +2,22 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- from huggingface_hub import login
6
-
7
- from huggingface_hub import login
8
- import os
9
-
10
- hf_token = os.getenv("HF_TOKEN") # Fetch token securely
11
- login(hf_token)
12
- # Load Mixtral model
13
- from transformers import AutoModelForCausalLM, AutoTokenizer
14
- import torch
15
-
16
- # Define the model name
17
- model_name = "microsoft/phi-2" # Ensure this is correct
18
-
19
- # Load the tokenizer and model
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name)
22
 
23
- # Set the device (use GPU if available, otherwise fallback to CPU)
24
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
- model.to(device) # Move model to the appropriate device
26
-
27
- print("Model loaded successfully!")
28
 
29
- # Create Gradio UI
30
- iface = gr.Interface(fn=solve_problem,
31
- inputs=gr.Textbox(lines=2, placeholder="Enter a JEE/NEET question..."),
32
- outputs="text",
33
- title="Prof. Cool - JEE/NEET Solver",
34
- description="Ask a complex JEE Advanced or NEET question and get a detailed solution.")
35
 
 
 
36
  iface.launch()
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ # Load the model and tokenizer
6
+ model_name = "microsoft/phi-2"
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ # Set device
11
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
+ model.to(device)
 
 
13
 
14
+ # Define the function to process user input
15
+ def solve_problem(user_input):
16
+ inputs = tokenizer(user_input, return_tensors="pt").to(device)
17
+ outputs = model.generate(**inputs, max_length=200)
18
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ return response
20
 
21
+ # Create Gradio interface
22
+ iface = gr.Interface(fn=solve_problem, inputs="text", outputs="text")
23
  iface.launch()