QiyuWu commited on
Commit
cebd036
·
verified ·
1 Parent(s): 54926e7

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +82 -0
  3. dpo-internlm2-1_8b.Q4_K_M.gguf +3 -0
  4. requirements.txt +8 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ dpo-internlm2-1_8b.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import copy
4
+ from llama_cpp import Llama
5
+ from huggingface_hub import hf_hub_download
6
+
7
+
8
+ llm = Llama(
9
+ model_path = "./dpo-internlm2-1_8b.Q4_K_M.gguf",
10
+ n_ctx=2048,
11
+ n_threads=2,
12
+ seed=57199,
13
+ batch_size=1024,
14
+ cache=True
15
+ )
16
+
17
+
18
+ def generate_text(
19
+ message,
20
+ history: list[tuple[str, str]],
21
+ system_message,
22
+ max_tokens,
23
+ temperature,
24
+ top_p,
25
+ ):
26
+ temp = ""
27
+ input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
28
+ for interaction in history:
29
+ input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
30
+
31
+ input_prompt = input_prompt + str(message) + " [/INST] "
32
+
33
+ output = llm(
34
+ input_prompt,
35
+ temperature=temperature,
36
+ top_p=top_p,
37
+ top_k=40,
38
+ repeat_penalty=1.2,
39
+ max_tokens=max_tokens,
40
+ stop=["[/INST]", "[UNUSED_TOKEN_145]", "<<SYS>>"],
41
+ stream=True,
42
+
43
+ )
44
+ for out in output:
45
+ stream = copy.deepcopy(out)
46
+ temp += stream["choices"][0]["text"]
47
+ yield temp
48
+
49
+
50
+ demo = gr.ChatInterface(
51
+ generate_text,
52
+ title="DPO-Internlm2-1_8B",
53
+ description="Running LLM with llama-cpp-python",
54
+ theme = gr.themes.Soft(),
55
+ examples=[
56
+ ['How to setup a human base on Mars? Give short answer.'],
57
+ ['Explain theory of relativity to me like I’m 8 years old.'],
58
+ ['What is 9,000 * 9,000?'],
59
+ ['Write a pun-filled happy birthday message to my friend Alex.'],
60
+ ['Justify why a penguin might make a good king of the jungle.']
61
+ ],
62
+ cache_examples=False,
63
+ retry_btn=None,
64
+ undo_btn="Delete Previous",
65
+ clear_btn="Clear",
66
+ additional_inputs=[
67
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
68
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
69
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.3, step=0.1, label="Temperature"),
70
+ gr.Slider(
71
+ minimum=0.1,
72
+ maximum=1.0,
73
+ value=0.90,
74
+ step=0.05,
75
+ label="Top-p (nucleus sampling)",
76
+ ),
77
+ ],
78
+ )
79
+
80
+
81
+ if __name__ == "__main__":
82
+ demo.launch()
dpo-internlm2-1_8b.Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:578eef594160235647379db7d7b48581ae23b375ab6a06736a22d59b37c76683
3
+ size 1172363936
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ sentencepiece
4
+ einops
5
+ mdtex2html
6
+ llama-cpp-python==0.2.69
7
+ gradio==4.28.3
8
+ huggingface_hub==0.22.2