TobDeBer commited on
Commit
dbc454e
·
verified ·
1 Parent(s): 7a75a1b

prompt format

Browse files
Files changed (1) hide show
  1. app.py +15 -1
app.py CHANGED
@@ -27,11 +27,25 @@ pipe = Llama(
27
  model_path=model_path
28
  )
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def predict(message: str, history: List[List[str]], max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, progress=gr.Progress()):
31
  if not message:
32
  return "", history, ""
33
 
34
- prompt = message
35
  history.append([message, ""])
36
 
37
  # Initialize reply for this round
 
27
  model_path=model_path
28
  )
29
 
30
+ def format_prompt(message: str, history: List[List[str]]):
31
+ prompt = "<|begin_of_text|>" # Start with the begin of text token
32
+ prompt += "<|im_start|>system\n<|im_end|>\n" # Assuming there's no system prompt here, just adding system role tags
33
+
34
+ for user_msg, assistant_msg in history:
35
+ prompt += f"<|im_start|>user\n{user_msg}<|im_end|>\n"
36
+ if assistant_msg:
37
+ prompt += f"<|im_start|>assistant\n{assistant_msg}<|im_end|>\n"
38
+
39
+ prompt += f"<|im_start|>user\n{message}<|im_end|>\n"
40
+ prompt += "<|im_start|>assistant\n" # Start of the Assistant's part
41
+
42
+ return prompt
43
+
44
  def predict(message: str, history: List[List[str]], max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, progress=gr.Progress()):
45
  if not message:
46
  return "", history, ""
47
 
48
+ prompt = format_prompt(message, history)
49
  history.append([message, ""])
50
 
51
  # Initialize reply for this round