starnernj commited on
Commit
9702672
·
verified ·
1 Parent(s): 507ce86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py CHANGED
@@ -1,5 +1,41 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
@@ -62,3 +98,4 @@ demo = gr.ChatInterface(
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from peft import PeftModel
5
+
6
+ # Base model (LLaMA 3.1 8B) from Meta
7
+ base_model_name = "meta-llama/Llama-3-8B"
8
+
9
+ # Your fine-tuned LoRA adapter (uploaded to Hugging Face)
10
+ lora_model_name = "starnernj/Early-Christian-Church-Fathers-LLaMA-3.1-Fine-Tuned"
11
+
12
+ # Load base model
13
+ model = AutoModelForCausalLM.from_pretrained(base_model_name)
14
+
15
+ # Load LoRA adapter
16
+ model = PeftModel.from_pretrained(model, lora_model_name)
17
+
18
+ # Load tokenizer
19
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
20
+
21
+ # Function to generate responses
22
+ def chatbot_response(user_input):
23
+ inputs = tokenizer(user_input, return_tensors="pt")
24
+ outputs = model.generate(**inputs, max_length=400)
25
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
26
+
27
+ # Launch the Gradio chatbot
28
+ interface = gr.Interface(
29
+ fn=chatbot_response,
30
+ inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
31
+ outputs="text",
32
+ title="Early Christian Church Fathers Fine-Tuned LLaMA 3.1 8B with LoRA",
33
+ description="A chatbot using my fine-tuned LoRA adapter on LLaMA 3.1 8B, tuned on thousands of writings of the early Christian Church Fathers.",
34
+ )
35
+
36
+ interface.launch()
37
+
38
+ """
39
 
40
  """
41
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
98
 
99
  if __name__ == "__main__":
100
  demo.launch()
101
+ """