File size: 1,270 Bytes
87ed98d
 
9702672
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b74937
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

# Base model (LLaMA 3.1 8B) from Meta
base_model_name = "meta-llama/Llama-3-8B"

# Your fine-tuned LoRA adapter (uploaded to Hugging Face)
lora_model_name = "starnernj/Early-Christian-Church-Fathers-LLaMA-3.1-Fine-Tuned"

# Load base model
model = AutoModelForCausalLM.from_pretrained(base_model_name)

# Load LoRA adapter
model = PeftModel.from_pretrained(model, lora_model_name)

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Function to generate responses
def chatbot_response(user_input):
    inputs = tokenizer(user_input, return_tensors="pt")
    outputs = model.generate(**inputs, max_length=400)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# Launch the Gradio chatbot
interface = gr.Interface(
    fn=chatbot_response,
    inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
    outputs="text",
    title="Early Christian Church Fathers Fine-Tuned LLaMA 3.1 8B with LoRA",
    description="A chatbot using my fine-tuned LoRA adapter on LLaMA 3.1 8B, tuned on thousands of writings of the early Christian Church Fathers.",
)

interface.launch()