lyimo commited on
Commit
cc431b5
Β·
1 Parent(s): 8c07d46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -54
app.py CHANGED
@@ -1,57 +1,47 @@
1
  import gradio as gr
2
- from fastai.vision.all import *
3
  import copy
4
- import os
5
  import time
 
6
  from llama_cpp import Llama
7
  from huggingface_hub import hf_hub_download
8
 
9
- # Load the LLM model
10
  llm = Llama(
11
  model_path=hf_hub_download(
12
  repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7B-Chat-GGML"),
13
  filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.ggmlv3.q5_0.bin"),
14
  ),
15
  n_ctx=2048,
16
- n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
17
  )
18
 
19
  history = []
20
 
21
  system_message = """
22
- You are a helpful bird researcher
 
23
  """
24
- # The rest of the system message
25
-
26
- # Load the Vision Model
27
- learn = load_learner('export.pkl')
28
 
29
- labels = learn.dls.vocab
30
 
31
- # Function to generate responses using LLM
32
  def generate_text(message, history):
33
  temp = ""
34
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
35
  for interaction in history:
36
  input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
37
 
38
- # Truncate or limit the length of the input message
39
- max_input_length = 500 # You can adjust this value as needed
40
- truncated_message = message[:max_input_length]
41
-
42
- input_prompt = input_prompt + str(truncated_message) + " [/INST] "
43
 
44
  output = llm(
45
  input_prompt,
46
  temperature=0.15,
47
  top_p=0.1,
48
- top_k=40,
49
  repeat_penalty=1.1,
50
- max_tokens=4096,
51
  stop=[
52
- "",
53
- "",
54
- " \n",
55
  "ASSISTANT:",
56
  "USER:",
57
  "SYSTEM:",
@@ -66,38 +56,24 @@ def generate_text(message, history):
66
  history = ["init", input_prompt]
67
 
68
 
69
- # Function to predict using the Vision Model and interact with LLM
70
  def predict(img):
71
  img = PILImage.create(img)
72
- pred, pred_idx, probs = learn.predict(img)
73
- bird_predictions = {labels[i]: float(probs[i]) for i in range(len(labels))}
74
-
75
- # Construct a message for LLM using bird predictions
76
- message = "I have detected:\n"
77
- for bird, prob in bird_predictions.items():
78
- message += f"- {bird}: {prob:.2%}\n"
79
-
80
- # Generate responses using LLM
81
- responses = list(generate_text(message, history))
82
-
83
- return {"bird_predictions": bird_predictions, "llm_responses": responses}
84
-
85
- title = "Bird Detector with LLM"
86
- description = "Detect birds and interact with LLM."
87
- examples = [['BIRD.png']]
88
- interpretation = 'default'
89
- enable_queue = True
90
-
91
- gr.Interface(
92
- fn=predict,
93
- inputs=gr.inputs.Image(),
94
- outputs=[
95
- gr.outputs.Label(label="Bird Predictions"),
96
- gr.outputs.Textbox(label="LLM Responses")
97
- ],
98
- title=title,
99
- description=description,
100
- examples=examples,
101
- interpretation=interpretation,
102
- enable_queue=enable_queue,
103
- ).launch()
 
1
  import gradio as gr
 
2
  import copy
 
3
  import time
4
+ import llama_cpp
5
  from llama_cpp import Llama
6
  from huggingface_hub import hf_hub_download
7
 
8
+
9
  llm = Llama(
10
  model_path=hf_hub_download(
11
  repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7B-Chat-GGML"),
12
  filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.ggmlv3.q5_0.bin"),
13
  ),
14
  n_ctx=2048,
15
+ n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
16
  )
17
 
18
  history = []
19
 
20
  system_message = """
21
+ You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
22
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
23
  """
 
 
 
 
24
 
 
25
 
 
26
  def generate_text(message, history):
27
  temp = ""
28
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
29
  for interaction in history:
30
  input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
31
 
32
+ input_prompt = input_prompt + str(message) + " [/INST] "
 
 
 
 
33
 
34
  output = llm(
35
  input_prompt,
36
  temperature=0.15,
37
  top_p=0.1,
38
+ top_k=40,
39
  repeat_penalty=1.1,
40
+ max_tokens=1024,
41
  stop=[
42
+ "<|prompter|>",
43
+ "<|endoftext|>",
44
+ "<|endoftext|> \n",
45
  "ASSISTANT:",
46
  "USER:",
47
  "SYSTEM:",
 
56
  history = ["init", input_prompt]
57
 
58
 
 
59
  def predict(img):
60
  img = PILImage.create(img)
61
+ pred,pred_idx,probs = learn.predict(img)
62
+ return {labels[i]: float(probs[i]) for i in range(len(labels))}
63
+
64
+ title = "Bird Detector"
65
+ description = "Bird Detector."
66
+ examples = ['BIRD.png']
67
+ interpretation='default'
68
+ enable_queue=True
69
+
70
+ def combined(img, message):
71
+ prediction = predict(img)
72
+ response = generate_text(message, history)
73
+ if "I have detected" in response:
74
+ response = response.replace("I have detected", f"I have detected {prediction['bird']} in the image.")
75
+
76
+ return response
77
+
78
+
79
+ gr.Interface(fn=combined,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Text(),title=title,description=description,examples=examples,interpretation=interpretation,enable_queue=enable_queue).launch()