Sh3rlockhomes commited on
Commit
b60996a
1 Parent(s): 50b875f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -0
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load the model and tokenizer from the repository
5
+ model_name = "Dumele/autotrain-shhsb-57a2l"
6
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
8
+
9
+ # Define the text generation function
10
+ def generate_text(prompt):
11
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
12
+ result = pipe(prompt)
13
+ return result[0]['generated_text']
14
+
15
+ # Create the Gradio interface
16
+ iface = gr.Interface(
17
+ fn=generate_text,
18
+ inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
19
+ outputs="text",
20
+ title="Text Generation with Mistral-7B",
21
+ description="Generate text using the fine-tuned Mistral-7B model from the Dumele repository."
22
+ )
23
+
24
+ # Launch the Gradio interface
25
+ iface.launch()