Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,35 +2,22 @@ import gradio as gr
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
from huggingface_hub import login
|
8 |
-
import os
|
9 |
-
|
10 |
-
hf_token = os.getenv("HF_TOKEN") # Fetch token securely
|
11 |
-
login(hf_token)
|
12 |
-
# Load Mixtral model
|
13 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
14 |
-
import torch
|
15 |
-
|
16 |
-
# Define the model name
|
17 |
-
model_name = "microsoft/phi-2" # Ensure this is correct
|
18 |
-
|
19 |
-
# Load the tokenizer and model
|
20 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
21 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
22 |
|
23 |
-
# Set
|
24 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
25 |
-
model.to(device)
|
26 |
-
|
27 |
-
print("Model loaded successfully!")
|
28 |
|
29 |
-
#
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
|
|
|
|
|
36 |
iface.launch()
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
+
# Load the model and tokenizer
|
6 |
+
model_name = "microsoft/phi-2"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
|
10 |
+
# Set device
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
model.to(device)
|
|
|
|
|
13 |
|
14 |
+
# Define the function to process user input
|
15 |
+
def solve_problem(user_input):
|
16 |
+
inputs = tokenizer(user_input, return_tensors="pt").to(device)
|
17 |
+
outputs = model.generate(**inputs, max_length=200)
|
18 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
19 |
+
return response
|
20 |
|
21 |
+
# Create Gradio interface
|
22 |
+
iface = gr.Interface(fn=solve_problem, inputs="text", outputs="text")
|
23 |
iface.launch()
|