lyimo commited on
Commit
8c07d46
Β·
1 Parent(s): 7ada8fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -35,7 +35,11 @@ def generate_text(message, history):
35
  for interaction in history:
36
  input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
37
 
38
- input_prompt = input_prompt + str(message) + " [/INST] "
 
 
 
 
39
 
40
  output = llm(
41
  input_prompt,
@@ -43,7 +47,7 @@ def generate_text(message, history):
43
  top_p=0.1,
44
  top_k=40,
45
  repeat_penalty=1.1,
46
- max_tokens=2000, # Increase this value
47
  stop=[
48
  "",
49
  "",
@@ -61,6 +65,7 @@ def generate_text(message, history):
61
 
62
  history = ["init", input_prompt]
63
 
 
64
  # Function to predict using the Vision Model and interact with LLM
65
  def predict(img):
66
  img = PILImage.create(img)
 
35
  for interaction in history:
36
  input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
37
 
38
+ # Truncate or limit the length of the input message
39
+ max_input_length = 500 # You can adjust this value as needed
40
+ truncated_message = message[:max_input_length]
41
+
42
+ input_prompt = input_prompt + str(truncated_message) + " [/INST] "
43
 
44
  output = llm(
45
  input_prompt,
 
47
  top_p=0.1,
48
  top_k=40,
49
  repeat_penalty=1.1,
50
+ max_tokens=4096,
51
  stop=[
52
  "",
53
  "",
 
65
 
66
  history = ["init", input_prompt]
67
 
68
+
69
  # Function to predict using the Vision Model and interact with LLM
70
  def predict(img):
71
  img = PILImage.create(img)