starnernj commited on
Commit
4c9f7f3
·
verified ·
1 Parent(s): 9b73d65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -0
app.py CHANGED
@@ -2,6 +2,9 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from peft import PeftModel
 
 
 
5
 
6
  # Base model (LLaMA 3.1 8B) from Meta
7
  base_model_name = "meta-llama/Llama-3.1-8B"
@@ -9,6 +12,9 @@ base_model_name = "meta-llama/Llama-3.1-8B"
9
  # Your fine-tuned LoRA adapter (uploaded to Hugging Face)
10
  lora_model_name = "starnernj/Early-Christian-Church-Fathers-LLaMA-3.1-Fine-Tuned"
11
 
 
 
 
12
  # Load base model
13
  model = AutoModelForCausalLM.from_pretrained(base_model_name)
14
 
 
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from peft import PeftModel
5
+ import os
6
+
7
+ HuggingFaceFineGrainedReadToken
8
 
9
  # Base model (LLaMA 3.1 8B) from Meta
10
  base_model_name = "meta-llama/Llama-3.1-8B"
 
12
  # Your fine-tuned LoRA adapter (uploaded to Hugging Face)
13
  lora_model_name = "starnernj/Early-Christian-Church-Fathers-LLaMA-3.1-Fine-Tuned"
14
 
15
+ # Login because LLaMA 3.1 8B is a gated model
16
+ login(token=os.getenv("HuggingFaceFineGrainedReadToken"))
17
+
18
  # Load base model
19
  model = AutoModelForCausalLM.from_pretrained(base_model_name)
20