lyimo commited on
Commit
4f36f3a
Β·
1 Parent(s): 326d94d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -25
app.py CHANGED
@@ -1,29 +1,33 @@
1
- import os
2
  import gradio as gr
 
3
  import copy
 
4
  import time
5
- import llama_cpp
6
  from llama_cpp import Llama
7
- from huggingface_hub import hf_hub_download
8
-
9
 
 
10
  llm = Llama(
11
  model_path=hf_hub_download(
12
  repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7B-Chat-GGML"),
13
  filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.ggmlv3.q5_0.bin"),
14
  ),
15
  n_ctx=2048,
16
- n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
17
- )
18
 
19
  history = []
20
 
21
  system_message = """
22
- You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
23
- If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
24
- """
25
 
 
 
26
 
 
 
 
27
  def generate_text(message, history):
28
  temp = ""
29
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
@@ -36,13 +40,13 @@ def generate_text(message, history):
36
  input_prompt,
37
  temperature=0.15,
38
  top_p=0.1,
39
- top_k=40,
40
  repeat_penalty=1.1,
41
  max_tokens=1024,
42
  stop=[
43
- "<|prompter|>",
44
- "<|endoftext|>",
45
- "<|endoftext|> \n",
46
  "ASSISTANT:",
47
  "USER:",
48
  "SYSTEM:",
@@ -56,16 +60,38 @@ def generate_text(message, history):
56
 
57
  history = ["init", input_prompt]
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- demo = gr.ChatInterface(
61
- generate_text,
62
- title="llama-cpp-python on GPU",
63
- description="Running LLM with https://github.com/abetlen/llama-cpp-python",
64
- examples=["tell me everything about llamas"],
65
- cache_examples=True,
66
- retry_btn=None,
67
- undo_btn="Delete Previous",
68
- clear_btn="Clear",
69
- )
70
- demo.queue(concurrency_count=1, max_size=5)
71
- demo.launch()
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from fastai.vision.all import *
3
  import copy
4
+ import os
5
  import time
 
6
  from llama_cpp import Llama
7
+ from huggingface_hub import hf_hub_download
 
8
 
9
+ # Load the LLM model
10
  llm = Llama(
11
  model_path=hf_hub_download(
12
  repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7B-Chat-GGML"),
13
  filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.ggmlv3.q5_0.bin"),
14
  ),
15
  n_ctx=2048,
16
+ n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
17
+ )
18
 
19
  history = []
20
 
21
  system_message = """
22
+ You are a helpful BIRD ASSISTANT, AND YOU KNOW A LOT ABOUT BIRDS"""
23
+ # The rest of the system message
 
24
 
25
+ # Load the Vision Model
26
+ learn = load_learner('export.pkl')
27
 
28
+ labels = learn.dls.vocab
29
+
30
+ # Function to generate responses using LLM
31
  def generate_text(message, history):
32
  temp = ""
33
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
 
40
  input_prompt,
41
  temperature=0.15,
42
  top_p=0.1,
43
+ top_k=40,
44
  repeat_penalty=1.1,
45
  max_tokens=1024,
46
  stop=[
47
+ "",
48
+ "",
49
+ " \n",
50
  "ASSISTANT:",
51
  "USER:",
52
  "SYSTEM:",
 
60
 
61
  history = ["init", input_prompt]
62
 
63
+ # Function to predict using the Vision Model and interact with LLM
64
+ def predict(img):
65
+ img = PILImage.create(img)
66
+ pred, pred_idx, probs = learn.predict(img)
67
+ bird_predictions = {labels[i]: float(probs[i]) for i in range(len(labels))}
68
+
69
+ # Construct a message for LLM using bird predictions
70
+ message = "I have detected:\n"
71
+ for bird, prob in bird_predictions.items():
72
+ message += f"- {bird}: {prob:.2%}\n"
73
+
74
+ # Generate responses using LLM
75
+ responses = generate_text(message, history)
76
+
77
+ return {"bird_predictions": bird_predictions, "llm_responses": responses}
78
 
79
+ title = "Bird Detector with LLM"
80
+ description = "Detect birds and interact with LLM."
81
+ examples = ['BIRD.jpg']
82
+ interpretation = 'default'
83
+ enable_queue = True
84
+
85
+ gr.Interface(
86
+ fn=predict,
87
+ inputs=gr.inputs.Image(shape=(512, 512)),
88
+ outputs={
89
+ "bird_predictions": gr.outputs.Data(type="dictionary"),
90
+ "llm_responses": gr.outputs.Textbox(),
91
+ },
92
+ title=title,
93
+ description=description,
94
+ examples=examples,
95
+ interpretation=interpretation,
96
+ enable_queue=enable_queue,
97
+ ).launch()