Spaces:
Runtime error
Runtime error
Upload 13 files
Browse files- ChatHaruhi/BaseDB.py +27 -0
- ChatHaruhi/BaseLLM.py +56 -0
- ChatHaruhi/ChatGLM2GPT.py +69 -0
- ChatHaruhi/ChatHaruhi.py +272 -0
- ChatHaruhi/ChromaDB.py +61 -0
- ChatHaruhi/GLMPro.py +90 -0
- ChatHaruhi/LangChainGPT.py +66 -0
- ChatHaruhi/PrintLLM.py +61 -0
- ChatHaruhi/SparkApi.py +139 -0
- ChatHaruhi/SparkGPT.py +54 -0
- ChatHaruhi/__init__.py +26 -0
- ChatHaruhi/role_name_to_file.py +67 -0
- ChatHaruhi/utils.py +180 -0
ChatHaruhi/BaseDB.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# BaseDB.py
|
2 |
+
|
3 |
+
from abc import ABC, abstractmethod
|
4 |
+
|
5 |
+
class BaseDB(ABC):
|
6 |
+
|
7 |
+
@abstractmethod
|
8 |
+
def init_db(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@abstractmethod
|
12 |
+
def save(self, file_path):
|
13 |
+
pass
|
14 |
+
|
15 |
+
@abstractmethod
|
16 |
+
def load(self, file_path):
|
17 |
+
pass
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def search(self, vector, n_results):
|
21 |
+
pass
|
22 |
+
|
23 |
+
@abstractmethod
|
24 |
+
def init_from_docs(self, vectors, documents):
|
25 |
+
pass
|
26 |
+
|
27 |
+
|
ChatHaruhi/BaseLLM.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ChatHaruhi: Reviving Anime Character in Reality via Large Language Model
|
2 |
+
#
|
3 |
+
# ChatHaruhi 2.0, built by Cheng Li and Weishi Mi
|
4 |
+
#
|
5 | |
6 |
+
#
|
7 |
+
# Weishi Mi is a second-year graduate student at Tsinghua University, majoring in computer science.
|
8 |
+
# Weishi Mi is pursuing a job or a PhD position, which who will be available next year
|
9 |
+
#
|
10 |
+
# homepage https://github.com/LC1332/Chat-Haruhi-Suzumiya
|
11 |
+
#
|
12 |
+
# ChatHaruhi is a chatbot that can revive anime characters in reality.
|
13 |
+
# the 2.0 version was built by Cheng Li and Weishi Mi.
|
14 |
+
#
|
15 |
+
# Please cite our paper if you use this code for research:
|
16 |
+
#
|
17 |
+
# @misc{li2023chatharuhi,
|
18 |
+
# title={ChatHaruhi: Reviving Anime Character in Reality via Large Language Model},
|
19 |
+
# author={Cheng Li and Ziang Leng and Chenxi Yan and Junyi Shen and Hao Wang and Weishi MI and Yaying Fei and Xiaoyang Feng and Song Yan and HaoSheng Wang and Linkang Zhan and Yaokai Jia and Pingyu Wu and Haozhen Sun},
|
20 |
+
# year={2023},
|
21 |
+
# eprint={2308.09597},
|
22 |
+
# archivePrefix={arXiv},
|
23 |
+
# primaryClass={cs.CL}
|
24 |
+
# }
|
25 |
+
from abc import ABC, abstractmethod
|
26 |
+
|
27 |
+
class BaseLLM(ABC):
|
28 |
+
|
29 |
+
def __init__(self):
|
30 |
+
pass
|
31 |
+
|
32 |
+
@abstractmethod
|
33 |
+
def initialize_message(self):
|
34 |
+
pass
|
35 |
+
|
36 |
+
@abstractmethod
|
37 |
+
def ai_message(self, payload):
|
38 |
+
pass
|
39 |
+
|
40 |
+
@abstractmethod
|
41 |
+
def system_message(self, payload):
|
42 |
+
pass
|
43 |
+
|
44 |
+
@abstractmethod
|
45 |
+
def user_message(self, payload):
|
46 |
+
pass
|
47 |
+
|
48 |
+
@abstractmethod
|
49 |
+
def get_response(self):
|
50 |
+
pass
|
51 |
+
|
52 |
+
@abstractmethod
|
53 |
+
def print_prompt(self):
|
54 |
+
pass
|
55 |
+
|
56 |
+
|
ChatHaruhi/ChatGLM2GPT.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from transformers import AutoTokenizer, AutoModel
|
3 |
+
from peft import LoraConfig, get_peft_model
|
4 |
+
from peft import PeftModel, PeftConfig
|
5 |
+
from .BaseLLM import BaseLLM
|
6 |
+
import torch
|
7 |
+
|
8 |
+
tokenizer_GLM = None
|
9 |
+
model_GLM = None
|
10 |
+
|
11 |
+
def initialize_GLM2LORA():
|
12 |
+
pass
|
13 |
+
global tokenizer_GLM
|
14 |
+
global model_GLM
|
15 |
+
|
16 |
+
if tokenizer_GLM == None and model_GLM == None:
|
17 |
+
tokenizer_GLM = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
18 |
+
model_GLM = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
|
19 |
+
|
20 |
+
config = LoraConfig(
|
21 |
+
r=16,
|
22 |
+
lora_alpha=32,
|
23 |
+
inference_mode=True,
|
24 |
+
lora_dropout=0.05,
|
25 |
+
#bias="none",
|
26 |
+
task_type="CAUSAL_LM"
|
27 |
+
)
|
28 |
+
|
29 |
+
model_GLM = PeftModel.from_pretrained(model_GLM, "silk-road/Chat-Haruhi-Fusion_B")
|
30 |
+
return model_GLM, tokenizer_GLM
|
31 |
+
|
32 |
+
def GLM_tokenizer(text):
|
33 |
+
return len(tokenizer_GLM.encode(text))
|
34 |
+
|
35 |
+
class ChatGLM2GPT(BaseLLM):
|
36 |
+
def __init__(self, model = "haruhi-fusion"):
|
37 |
+
super(ChatGLM2GPT, self).__init__()
|
38 |
+
if model == "glm2-6b":
|
39 |
+
self.tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
40 |
+
self.model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
|
41 |
+
if model == "haruhi-fusion":
|
42 |
+
self.model, self.tokenizer = initialize_GLM2LORA()
|
43 |
+
else:
|
44 |
+
raise Exception("Unknown GLM model")
|
45 |
+
self.messages = ""
|
46 |
+
|
47 |
+
def initialize_message(self):
|
48 |
+
self.message = ""
|
49 |
+
|
50 |
+
def ai_message(self, payload):
|
51 |
+
self.messages = self.messages + "\n " + payload
|
52 |
+
|
53 |
+
def system_message(self, payload):
|
54 |
+
self.messages = self.messages + "\n " + payload
|
55 |
+
|
56 |
+
def user_message(self, payload):
|
57 |
+
self.messages = self.messages + "\n " + payload
|
58 |
+
|
59 |
+
def get_response(self):
|
60 |
+
with torch.no_grad():
|
61 |
+
response, history = self.model.chat(self.tokenizer, self.messages, history=[])
|
62 |
+
# print(response)
|
63 |
+
return response
|
64 |
+
|
65 |
+
def print_prompt(self):
|
66 |
+
print(type(self.messages))
|
67 |
+
print(self.messages)
|
68 |
+
|
69 |
+
|
ChatHaruhi/ChatHaruhi.py
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .ChromaDB import ChromaDB
|
2 |
+
import os
|
3 |
+
|
4 |
+
from .utils import luotuo_openai_embedding, tiktokenizer
|
5 |
+
|
6 |
+
from .utils import response_postprocess
|
7 |
+
|
8 |
+
class ChatHaruhi:
|
9 |
+
|
10 |
+
def __init__(self, system_prompt = None, \
|
11 |
+
role_name = None, \
|
12 |
+
story_db=None, story_text_folder = None, \
|
13 |
+
llm = 'openai', \
|
14 |
+
embedding = 'luotuo_openai', \
|
15 |
+
max_len_story = None, max_len_history = None,
|
16 |
+
verbose = False):
|
17 |
+
super(ChatHaruhi, self).__init__()
|
18 |
+
self.verbose = verbose
|
19 |
+
|
20 |
+
# constants
|
21 |
+
self.story_prefix_prompt = "Classic scenes for the role are as follows:\n"
|
22 |
+
self.k_search = 19
|
23 |
+
self.narrator = ['旁白', '', 'scene','Scene','narrator' , 'Narrator']
|
24 |
+
self.dialogue_divide_token = '\n###\n'
|
25 |
+
self.dialogue_bra_token = '「'
|
26 |
+
self.dialogue_ket_token = '」'
|
27 |
+
|
28 |
+
if system_prompt:
|
29 |
+
self.system_prompt = self.check_system_prompt( system_prompt )
|
30 |
+
|
31 |
+
# TODO: embedding should be the seperately defined, so refactor this part later
|
32 |
+
if llm == 'openai':
|
33 |
+
# self.llm = LangChainGPT()
|
34 |
+
self.llm, self.tokenizer = self.get_models('openai')
|
35 |
+
elif llm == 'debug':
|
36 |
+
self.llm, self.tokenizer = self.get_models( 'debug')
|
37 |
+
elif llm == 'spark':
|
38 |
+
self.llm, self.tokenizer = self.get_models( 'spark')
|
39 |
+
elif llm == 'GLMPro':
|
40 |
+
self.llm, self.tokenizer = self.get_models( 'GLMPro')
|
41 |
+
elif llm == 'ChatGLM2GPT':
|
42 |
+
self.llm, self.tokenizer = self.get_models( 'ChatGLM2GPT')
|
43 |
+
self.story_prefix_prompt = '\n'
|
44 |
+
else:
|
45 |
+
print(f'warning! undefined llm {llm}, use openai instead.')
|
46 |
+
self.llm, self.tokenizer = self.get_models('openai')
|
47 |
+
|
48 |
+
if embedding == 'luotuo_openai':
|
49 |
+
self.embedding = luotuo_openai_embedding
|
50 |
+
else:
|
51 |
+
print(f'warning! undefined embedding {embedding}, use luotuo_openai instead.')
|
52 |
+
self.embedding = luotuo_openai_embedding
|
53 |
+
|
54 |
+
if role_name:
|
55 |
+
|
56 |
+
from .role_name_to_file import get_folder_role_name
|
57 |
+
# correct role_name to folder_role_name
|
58 |
+
role_name, url = get_folder_role_name(role_name)
|
59 |
+
|
60 |
+
unzip_folder = f'./temp_character_folder/temp_{role_name}'
|
61 |
+
db_folder = os.path.join(unzip_folder, f'content/{role_name}')
|
62 |
+
system_prompt = os.path.join(unzip_folder, f'content/system_prompt.txt')
|
63 |
+
|
64 |
+
if not os.path.exists(unzip_folder):
|
65 |
+
# not yet downloaded
|
66 |
+
# url = f'https://github.com/LC1332/Haruhi-2-Dev/raw/main/data/character_in_zip/{role_name}.zip'
|
67 |
+
import requests, zipfile, io
|
68 |
+
r = requests.get(url)
|
69 |
+
z = zipfile.ZipFile(io.BytesIO(r.content))
|
70 |
+
z.extractall(unzip_folder)
|
71 |
+
|
72 |
+
if self.verbose:
|
73 |
+
print(f'loading pre-defined character {role_name}...')
|
74 |
+
|
75 |
+
self.db = ChromaDB()
|
76 |
+
self.db.load(db_folder)
|
77 |
+
self.system_prompt = self.check_system_prompt(system_prompt)
|
78 |
+
|
79 |
+
elif story_db:
|
80 |
+
self.db = ChromaDB()
|
81 |
+
self.db.load(story_db)
|
82 |
+
elif story_text_folder:
|
83 |
+
# print("Building story database from texts...")
|
84 |
+
self.db = self.build_story_db(story_text_folder)
|
85 |
+
else:
|
86 |
+
self.db = None
|
87 |
+
print('warning! database not yet figured out, both story_db and story_text_folder are not inputted.')
|
88 |
+
# raise ValueError("Either story_db or story_text_folder must be provided")
|
89 |
+
|
90 |
+
|
91 |
+
self.max_len_story, self.max_len_history = self.get_tokenlen_setting('openai')
|
92 |
+
|
93 |
+
if max_len_history is not None:
|
94 |
+
self.max_len_history = max_len_history
|
95 |
+
# user setting will override default setting
|
96 |
+
|
97 |
+
if max_len_story is not None:
|
98 |
+
self.max_len_story = max_len_story
|
99 |
+
# user setting will override default setting
|
100 |
+
|
101 |
+
self.dialogue_history = []
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
def check_system_prompt(self, system_prompt):
|
106 |
+
# if system_prompt end with .txt, read the file with utf-8
|
107 |
+
# else, return the string directly
|
108 |
+
if system_prompt.endswith('.txt'):
|
109 |
+
with open(system_prompt, 'r', encoding='utf-8') as f:
|
110 |
+
return f.read()
|
111 |
+
else:
|
112 |
+
return system_prompt
|
113 |
+
|
114 |
+
|
115 |
+
def get_models(self, model_name):
|
116 |
+
|
117 |
+
# TODO: if output only require tokenizer model, no need to initialize llm
|
118 |
+
|
119 |
+
# return the combination of llm, embedding and tokenizer
|
120 |
+
if model_name == 'openai':
|
121 |
+
from .LangChainGPT import LangChainGPT
|
122 |
+
return (LangChainGPT(), tiktokenizer)
|
123 |
+
elif model_name == 'debug':
|
124 |
+
from .PrintLLM import PrintLLM
|
125 |
+
return (PrintLLM(), tiktokenizer)
|
126 |
+
elif model_name == 'spark':
|
127 |
+
from .SparkGPT import SparkGPT
|
128 |
+
return (SparkGPT(), tiktokenizer)
|
129 |
+
elif model_name == 'GLMPro':
|
130 |
+
from .GLMPro import GLMPro
|
131 |
+
return (GLMPro(), tiktokenizer)
|
132 |
+
elif model_name == "ChatGLM2GPT":
|
133 |
+
from .ChatGLM2GPT import ChatGLM2GPT, GLM_tokenizer
|
134 |
+
return (ChatGLM2GPT(), GLM_tokenizer)
|
135 |
+
else:
|
136 |
+
print(f'warning! undefined model {model_name}, use openai instead.')
|
137 |
+
from .LangChainGPT import LangChainGPT
|
138 |
+
return (LangChainGPT(), tiktokenizer)
|
139 |
+
|
140 |
+
def get_tokenlen_setting( self, model_name ):
|
141 |
+
# return the setting of story and history token length
|
142 |
+
if model_name == 'openai':
|
143 |
+
return (1500, 1200)
|
144 |
+
else:
|
145 |
+
print(f'warning! undefined model {model_name}, use openai instead.')
|
146 |
+
return (1500, 1200)
|
147 |
+
|
148 |
+
def build_story_db_from_vec( self, texts, vecs ):
|
149 |
+
self.db = ChromaDB()
|
150 |
+
|
151 |
+
self.db.init_from_docs( vecs, texts)
|
152 |
+
|
153 |
+
def build_story_db(self, text_folder):
|
154 |
+
# 实现读取文本文件夹,抽取向量的逻辑
|
155 |
+
db = ChromaDB()
|
156 |
+
|
157 |
+
strs = []
|
158 |
+
|
159 |
+
# scan all txt file from text_folder
|
160 |
+
for file in os.listdir(text_folder):
|
161 |
+
# if file name end with txt
|
162 |
+
if file.endswith(".txt"):
|
163 |
+
file_path = os.path.join(text_folder, file)
|
164 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
165 |
+
strs.append(f.read())
|
166 |
+
|
167 |
+
if self.verbose:
|
168 |
+
print(f'starting extract embedding... for { len(strs) } files')
|
169 |
+
|
170 |
+
vecs = []
|
171 |
+
|
172 |
+
## TODO: 建立一个新的embedding batch test的单元测试
|
173 |
+
## 新的支持list batch test的embedding代码
|
174 |
+
## 用新的代码替换下面的for循环
|
175 |
+
## Luotuo-bert-en也发布了,所以可以避开使用openai
|
176 |
+
|
177 |
+
for mystr in strs:
|
178 |
+
vecs.append(self.embedding(mystr))
|
179 |
+
|
180 |
+
db.init_from_docs(vecs, strs)
|
181 |
+
|
182 |
+
return db
|
183 |
+
|
184 |
+
def save_story_db(self, db_path):
|
185 |
+
self.db.save(db_path)
|
186 |
+
|
187 |
+
def chat(self, text, role):
|
188 |
+
# add system prompt
|
189 |
+
self.llm.initialize_message()
|
190 |
+
self.llm.system_message(self.system_prompt)
|
191 |
+
|
192 |
+
|
193 |
+
# add story
|
194 |
+
query = self.get_query_string(text, role)
|
195 |
+
self.add_story( query )
|
196 |
+
|
197 |
+
# add history
|
198 |
+
self.add_history()
|
199 |
+
|
200 |
+
# add query
|
201 |
+
self.llm.user_message(query)
|
202 |
+
|
203 |
+
# get response
|
204 |
+
response_raw = self.llm.get_response()
|
205 |
+
|
206 |
+
response = response_postprocess(response_raw, self.dialogue_bra_token, self.dialogue_ket_token)
|
207 |
+
|
208 |
+
# record dialogue history
|
209 |
+
self.dialogue_history.append((query, response))
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
return response
|
214 |
+
|
215 |
+
def get_query_string(self, text, role):
|
216 |
+
if role in self.narrator:
|
217 |
+
return role + ":" + text
|
218 |
+
else:
|
219 |
+
return f"{role}:{self.dialogue_bra_token}{text}{self.dialogue_ket_token}"
|
220 |
+
|
221 |
+
def add_story(self, query):
|
222 |
+
|
223 |
+
if self.db is None:
|
224 |
+
return
|
225 |
+
|
226 |
+
query_vec = self.embedding(query)
|
227 |
+
|
228 |
+
stories = self.db.search(query_vec, self.k_search)
|
229 |
+
|
230 |
+
story_string = self.story_prefix_prompt
|
231 |
+
sum_story_token = self.tokenizer(story_string)
|
232 |
+
|
233 |
+
for story in stories:
|
234 |
+
story_token = self.tokenizer(story) + self.tokenizer(self.dialogue_divide_token)
|
235 |
+
if sum_story_token + story_token > self.max_len_story:
|
236 |
+
break
|
237 |
+
else:
|
238 |
+
sum_story_token += story_token
|
239 |
+
story_string += story + self.dialogue_divide_token
|
240 |
+
|
241 |
+
self.llm.user_message(story_string)
|
242 |
+
|
243 |
+
def add_history(self):
|
244 |
+
|
245 |
+
if len(self.dialogue_history) == 0:
|
246 |
+
return
|
247 |
+
|
248 |
+
sum_history_token = 0
|
249 |
+
flag = 0
|
250 |
+
for query, response in reversed(self.dialogue_history):
|
251 |
+
current_count = 0
|
252 |
+
if query is not None:
|
253 |
+
current_count += self.tokenizer(query)
|
254 |
+
if response is not None:
|
255 |
+
current_count += self.tokenizer(response)
|
256 |
+
sum_history_token += current_count
|
257 |
+
if sum_history_token > self.max_len_history:
|
258 |
+
break
|
259 |
+
else:
|
260 |
+
flag += 1
|
261 |
+
|
262 |
+
if flag == 0:
|
263 |
+
print('warning! no history added. the last dialogue is too long.')
|
264 |
+
|
265 |
+
for (query, response) in self.dialogue_history[-flag:]:
|
266 |
+
if query is not None:
|
267 |
+
self.llm.user_message(query)
|
268 |
+
if response is not None:
|
269 |
+
self.llm.ai_message(response)
|
270 |
+
|
271 |
+
|
272 |
+
|
ChatHaruhi/ChromaDB.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import chromadb
|
2 |
+
from .BaseDB import BaseDB
|
3 |
+
import random
|
4 |
+
import string
|
5 |
+
import os
|
6 |
+
|
7 |
+
class ChromaDB(BaseDB):
|
8 |
+
|
9 |
+
def __init__(self):
|
10 |
+
self.client = None
|
11 |
+
self.collection = None
|
12 |
+
self.path = None
|
13 |
+
|
14 |
+
def init_db(self):
|
15 |
+
|
16 |
+
if self.client is not None:
|
17 |
+
print('ChromaDB has already been initialized')
|
18 |
+
return
|
19 |
+
|
20 |
+
folder_name = ''
|
21 |
+
|
22 |
+
while os.path.exists(folder_name) or folder_name == '':
|
23 |
+
# try to create a folder named temp_<random string> which is not yet existed
|
24 |
+
folder_name = "tempdb_" + ''.join(random.sample(string.ascii_letters + string.digits, 8))
|
25 |
+
|
26 |
+
self.path = folder_name
|
27 |
+
self.client = chromadb.PersistentClient(path = folder_name)
|
28 |
+
|
29 |
+
self.collection = self.client.get_or_create_collection("search")
|
30 |
+
|
31 |
+
def save(self, file_path):
|
32 |
+
if file_path != self.path:
|
33 |
+
# copy all files in self.path to file_path, with overwrite
|
34 |
+
os.system("cp -r " + self.path + " " + file_path)
|
35 |
+
previous_path = self.path
|
36 |
+
self.path = file_path
|
37 |
+
self.client = chromadb.PersistentClient(path = file_path)
|
38 |
+
# remove previous path if it start with tempdb
|
39 |
+
if previous_path.startswith("tempdb"):
|
40 |
+
os.system("rm -rf " + previous_path)
|
41 |
+
|
42 |
+
|
43 |
+
def load(self, file_path):
|
44 |
+
self.path = file_path
|
45 |
+
self.client = chromadb.PersistentClient(path = file_path)
|
46 |
+
self.collection = self.client.get_collection("search")
|
47 |
+
|
48 |
+
def search(self, vector, n_results):
|
49 |
+
results = self.collection.query(query_embeddings=[vector], n_results=n_results)
|
50 |
+
return results['documents'][0]
|
51 |
+
|
52 |
+
def init_from_docs(self, vectors, documents):
|
53 |
+
if self.client is None:
|
54 |
+
self.init_db()
|
55 |
+
|
56 |
+
ids = []
|
57 |
+
for i, doc in enumerate(documents):
|
58 |
+
first_four_chat = doc[:min(4, len(doc))]
|
59 |
+
ids.append( str(i) + "_" + doc)
|
60 |
+
self.collection.add(embeddings=vectors, documents=documents, ids = ids)
|
61 |
+
|
ChatHaruhi/GLMPro.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .BaseLLM import BaseLLM
|
2 |
+
import os
|
3 |
+
|
4 |
+
zhipu_api = os.environ['ZHIPU_API']
|
5 |
+
|
6 |
+
import zhipuai
|
7 |
+
import time
|
8 |
+
|
9 |
+
class GLMPro( BaseLLM ):
|
10 |
+
def __init__(self, model="chatglm_pro", verbose = False ):
|
11 |
+
super(GLMPro,self).__init__()
|
12 |
+
|
13 |
+
zhipuai.api_key = zhipu_api
|
14 |
+
|
15 |
+
self.verbose = verbose
|
16 |
+
|
17 |
+
self.model_name = model
|
18 |
+
|
19 |
+
self.prompts = []
|
20 |
+
|
21 |
+
if self.verbose == True:
|
22 |
+
print('model name, ', self.model_name )
|
23 |
+
if len( zhipu_api ) > 8:
|
24 |
+
print( 'found apikey ', zhipu_api[:4], '****', zhipu_api[-4:] )
|
25 |
+
else:
|
26 |
+
print( 'found apikey but too short, ' )
|
27 |
+
|
28 |
+
|
29 |
+
def initialize_message(self):
|
30 |
+
self.prompts = []
|
31 |
+
|
32 |
+
def ai_message(self, payload):
|
33 |
+
self.prompts.append({"role":"assistant","content":payload})
|
34 |
+
|
35 |
+
def system_message(self, payload):
|
36 |
+
self.prompts.append({"role":"user","content":payload})
|
37 |
+
|
38 |
+
def user_message(self, payload):
|
39 |
+
self.prompts.append({"role":"user","content":payload})
|
40 |
+
|
41 |
+
def get_response(self):
|
42 |
+
zhipuai.api_key = zhipu_api
|
43 |
+
max_test_name = 5
|
44 |
+
sleep_interval = 3
|
45 |
+
|
46 |
+
request_id = None
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
# try submit asychonize request until success
|
51 |
+
for test_time in range( max_test_name ):
|
52 |
+
response = zhipuai.model_api.async_invoke(
|
53 |
+
model = self.model_name,
|
54 |
+
prompt = self.prompts,
|
55 |
+
temperature = 0)
|
56 |
+
if response['success'] == True:
|
57 |
+
request_id = response['data']['task_id']
|
58 |
+
|
59 |
+
if self.verbose == True:
|
60 |
+
print('submit request, id = ', request_id )
|
61 |
+
break
|
62 |
+
else:
|
63 |
+
print('submit GLM request failed, retrying...')
|
64 |
+
time.sleep( sleep_interval )
|
65 |
+
|
66 |
+
if request_id:
|
67 |
+
# try get response until success
|
68 |
+
for test_time in range( 2 * max_test_name ):
|
69 |
+
result = zhipuai.model_api.query_async_invoke_result( request_id )
|
70 |
+
if result['code'] == 200 and result['data']['task_status'] == 'SUCCESS':
|
71 |
+
|
72 |
+
if self.verbose == True:
|
73 |
+
print('get GLM response success' )
|
74 |
+
|
75 |
+
choices = result['data']['choices']
|
76 |
+
if len( choices ) > 0:
|
77 |
+
return choices[-1]['content'].strip("\"'")
|
78 |
+
|
79 |
+
# other wise means failed
|
80 |
+
if self.verbose == True:
|
81 |
+
print('get GLM response failed, retrying...')
|
82 |
+
# sleep for 1 second
|
83 |
+
time.sleep( sleep_interval )
|
84 |
+
else:
|
85 |
+
print('submit GLM request failed, please check your api key and model name')
|
86 |
+
return ''
|
87 |
+
|
88 |
+
def print_prompt(self):
|
89 |
+
for message in self.prompts:
|
90 |
+
print(f"{message['role']}: {message['content']}")
|
ChatHaruhi/LangChainGPT.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ChatHaruhi: Reviving Anime Character in Reality via Large Language Model
|
2 |
+
#
|
3 |
+
# ChatHaruhi 2.0, built by Cheng Li and Weishi Mi
|
4 |
+
#
|
5 | |
6 |
+
#
|
7 |
+
# Weishi Mi is a second-year graduate student at Tsinghua University, majoring in computer science.
|
8 |
+
# Weishi Mi is pursuing a job or a PhD position, which who will be available next year
|
9 |
+
#
|
10 |
+
# homepage https://github.com/LC1332/Chat-Haruhi-Suzumiya
|
11 |
+
#
|
12 |
+
# ChatHaruhi is a chatbot that can revive anime characters in reality.
|
13 |
+
# the 2.0 version was built by Cheng Li and Weishi Mi.
|
14 |
+
#
|
15 |
+
# Please cite our paper if you use this code for research:
|
16 |
+
#
|
17 |
+
# @misc{li2023chatharuhi,
|
18 |
+
# title={ChatHaruhi: Reviving Anime Character in Reality via Large Language Model},
|
19 |
+
# author={Cheng Li and Ziang Leng and Chenxi Yan and Junyi Shen and Hao Wang and Weishi MI and Yaying Fei and Xiaoyang Feng and Song Yan and HaoSheng Wang and Linkang Zhan and Yaokai Jia and Pingyu Wu and Haozhen Sun},
|
20 |
+
# year={2023},
|
21 |
+
# eprint={2308.09597},
|
22 |
+
# archivePrefix={arXiv},
|
23 |
+
# primaryClass={cs.CL}
|
24 |
+
# }
|
25 |
+
|
26 |
+
|
27 |
+
from langchain.chat_models import ChatOpenAI
|
28 |
+
from langchain.prompts.chat import (
|
29 |
+
ChatPromptTemplate,
|
30 |
+
SystemMessagePromptTemplate,
|
31 |
+
AIMessagePromptTemplate,
|
32 |
+
HumanMessagePromptTemplate,
|
33 |
+
)
|
34 |
+
from langchain.schema import (
|
35 |
+
AIMessage,
|
36 |
+
HumanMessage,
|
37 |
+
SystemMessage
|
38 |
+
)
|
39 |
+
from .BaseLLM import BaseLLM
|
40 |
+
|
41 |
+
class LangChainGPT(BaseLLM):
|
42 |
+
|
43 |
+
def __init__(self, model="gpt-3.5-turbo"):
|
44 |
+
super(LangChainGPT,self).__init__()
|
45 |
+
self.chat = ChatOpenAI(model=model)
|
46 |
+
self.messages = []
|
47 |
+
|
48 |
+
def initialize_message(self):
|
49 |
+
self.messages = []
|
50 |
+
|
51 |
+
def ai_message(self, payload):
|
52 |
+
self.messages.append(AIMessage(content = payload))
|
53 |
+
|
54 |
+
def system_message(self, payload):
|
55 |
+
self.messages.append(SystemMessage(content = payload))
|
56 |
+
|
57 |
+
def user_message(self, payload):
|
58 |
+
self.messages.append(HumanMessage(content = payload))
|
59 |
+
|
60 |
+
def get_response(self):
|
61 |
+
response = self.chat(self.messages)
|
62 |
+
return response.content
|
63 |
+
|
64 |
+
def print_prompt(self):
|
65 |
+
for message in self.messages:
|
66 |
+
print(message)
|
ChatHaruhi/PrintLLM.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ChatHaruhi: Reviving Anime Character in Reality via Large Language Model
|
2 |
+
#
|
3 |
+
# ChatHaruhi 2.0, built by Cheng Li and Weishi Mi
|
4 |
+
#
|
5 | |
6 |
+
#
|
7 |
+
# Weishi Mi is a second-year graduate student at Tsinghua University, majoring in computer science.
|
8 |
+
# Weishi Mi is pursuing a job or a PhD position, which who will be available next year
|
9 |
+
#
|
10 |
+
# homepage https://github.com/LC1332/Chat-Haruhi-Suzumiya
|
11 |
+
#
|
12 |
+
# ChatHaruhi is a chatbot that can revive anime characters in reality.
|
13 |
+
# the 2.0 version was built by Cheng Li and Weishi Mi.
|
14 |
+
#
|
15 |
+
# Please cite our paper if you use this code for research:
|
16 |
+
#
|
17 |
+
# @misc{li2023chatharuhi,
|
18 |
+
# title={ChatHaruhi: Reviving Anime Character in Reality via Large Language Model},
|
19 |
+
# author={Cheng Li and Ziang Leng and Chenxi Yan and Junyi Shen and Hao Wang and Weishi MI and Yaying Fei and Xiaoyang Feng and Song Yan and HaoSheng Wang and Linkang Zhan and Yaokai Jia and Pingyu Wu and Haozhen Sun},
|
20 |
+
# year={2023},
|
21 |
+
# eprint={2308.09597},
|
22 |
+
# archivePrefix={arXiv},
|
23 |
+
# primaryClass={cs.CL}
|
24 |
+
# }
|
25 |
+
#
|
26 |
+
# This PrintLLM.py is for debuging with any real-runing LLM
|
27 |
+
# so you can see full prompt and copy it into GPT or Claude to debug
|
28 |
+
#
|
29 |
+
|
30 |
+
from .BaseLLM import BaseLLM
|
31 |
+
|
32 |
+
class PrintLLM(BaseLLM):
|
33 |
+
|
34 |
+
def __init__(self ):
|
35 |
+
self.messages = []
|
36 |
+
self.messages.append("Noticing: This is a print LLM for debug.")
|
37 |
+
self.messages.append("But you can also copy the prompt into GPT or Claude to debugging")
|
38 |
+
|
39 |
+
def initialize_message(self):
|
40 |
+
self.messages = []
|
41 |
+
self.messages.append("Noticing: This is a print LLM for debug.")
|
42 |
+
self.messages.append("But you can also copy the prompt into GPT or Claude to debugging")
|
43 |
+
|
44 |
+
def ai_message(self, payload):
|
45 |
+
self.messages.append("AI: \n" + payload)
|
46 |
+
|
47 |
+
def system_message(self, payload):
|
48 |
+
self.messages.append("System: \n" + payload)
|
49 |
+
|
50 |
+
def user_message(self, payload):
|
51 |
+
self.messages.append("User: \n" + payload)
|
52 |
+
|
53 |
+
def get_response(self):
|
54 |
+
for message in self.messages:
|
55 |
+
print(message)
|
56 |
+
response = input("Please input your response: ")
|
57 |
+
return response
|
58 |
+
|
59 |
+
def print_prompt(self):
|
60 |
+
for message in self.messages:
|
61 |
+
print(message)
|
ChatHaruhi/SparkApi.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 由讯飞提供的websocket接口,用于与星火机器人进行交互
|
2 |
+
|
3 |
+
import _thread as thread
|
4 |
+
import base64
|
5 |
+
import datetime
|
6 |
+
import hashlib
|
7 |
+
import hmac
|
8 |
+
import json
|
9 |
+
from urllib.parse import urlparse
|
10 |
+
import ssl
|
11 |
+
from datetime import datetime
|
12 |
+
from time import mktime
|
13 |
+
from urllib.parse import urlencode
|
14 |
+
from wsgiref.handlers import format_date_time
|
15 |
+
|
16 |
+
import websocket # 使用websocket_client
|
17 |
+
answer = ""
|
18 |
+
|
19 |
+
class Ws_Param(object):
|
20 |
+
# 初始化
|
21 |
+
def __init__(self, APPID, APIKey, APISecret, Spark_url):
|
22 |
+
self.APPID = APPID
|
23 |
+
self.APIKey = APIKey
|
24 |
+
self.APISecret = APISecret
|
25 |
+
self.host = urlparse(Spark_url).netloc
|
26 |
+
self.path = urlparse(Spark_url).path
|
27 |
+
self.Spark_url = Spark_url
|
28 |
+
|
29 |
+
# 生成url
|
30 |
+
def create_url(self):
|
31 |
+
# 生成RFC1123格式的时间戳
|
32 |
+
now = datetime.now()
|
33 |
+
date = format_date_time(mktime(now.timetuple()))
|
34 |
+
|
35 |
+
# 拼接字符串
|
36 |
+
signature_origin = "host: " + self.host + "\n"
|
37 |
+
signature_origin += "date: " + date + "\n"
|
38 |
+
signature_origin += "GET " + self.path + " HTTP/1.1"
|
39 |
+
|
40 |
+
# 进行hmac-sha256进行加密
|
41 |
+
signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
|
42 |
+
digestmod=hashlib.sha256).digest()
|
43 |
+
|
44 |
+
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
|
45 |
+
|
46 |
+
authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
|
47 |
+
|
48 |
+
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
|
49 |
+
|
50 |
+
# 将请求的鉴权参数组合为字典
|
51 |
+
v = {
|
52 |
+
"authorization": authorization,
|
53 |
+
"date": date,
|
54 |
+
"host": self.host
|
55 |
+
}
|
56 |
+
# 拼接鉴权参数,生成url
|
57 |
+
url = self.Spark_url + '?' + urlencode(v)
|
58 |
+
# 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
|
59 |
+
return url
|
60 |
+
|
61 |
+
|
62 |
+
# 收到websocket错误的处理
|
63 |
+
def on_error(ws, error):
|
64 |
+
print("### error:", error)
|
65 |
+
|
66 |
+
|
67 |
+
# 收到websocket关闭的处理
|
68 |
+
def on_close(ws,one,two):
|
69 |
+
print(" ")
|
70 |
+
|
71 |
+
|
72 |
+
# 收到websocket连接建立的处理
|
73 |
+
def on_open(ws):
|
74 |
+
thread.start_new_thread(run, (ws,))
|
75 |
+
|
76 |
+
|
77 |
+
def run(ws, *args):
|
78 |
+
data = json.dumps(gen_params(appid=ws.appid, domain= ws.domain,question=ws.question))
|
79 |
+
ws.send(data)
|
80 |
+
|
81 |
+
|
82 |
+
# 收到websocket消息的处理
|
83 |
+
def on_message(ws, message):
|
84 |
+
# print(message)
|
85 |
+
data = json.loads(message)
|
86 |
+
code = data['header']['code']
|
87 |
+
if code != 0:
|
88 |
+
print(f'请求错误: {code}, {data}')
|
89 |
+
ws.close()
|
90 |
+
else:
|
91 |
+
choices = data["payload"]["choices"]
|
92 |
+
status = choices["status"]
|
93 |
+
content = choices["text"][0]["content"]
|
94 |
+
# print(content,end ="")
|
95 |
+
global answer
|
96 |
+
answer += content
|
97 |
+
# print(1)
|
98 |
+
if status == 2:
|
99 |
+
ws.close()
|
100 |
+
|
101 |
+
|
102 |
+
def gen_params(appid, domain,question):
|
103 |
+
"""
|
104 |
+
通过appid和用户的提问来生成请参数
|
105 |
+
"""
|
106 |
+
data = {
|
107 |
+
"header": {
|
108 |
+
"app_id": appid,
|
109 |
+
"uid": "1234"
|
110 |
+
},
|
111 |
+
"parameter": {
|
112 |
+
"chat": {
|
113 |
+
"domain": domain,
|
114 |
+
"random_threshold": 0.5,
|
115 |
+
"max_tokens": 2048,
|
116 |
+
"auditing": "default"
|
117 |
+
}
|
118 |
+
},
|
119 |
+
"payload": {
|
120 |
+
"message": {
|
121 |
+
"text": question
|
122 |
+
}
|
123 |
+
}
|
124 |
+
}
|
125 |
+
return data
|
126 |
+
|
127 |
+
|
128 |
+
def main(appid, api_key, api_secret, Spark_url,domain, question):
|
129 |
+
# print("星火:")
|
130 |
+
wsParam = Ws_Param(appid, api_key, api_secret, Spark_url)
|
131 |
+
websocket.enableTrace(False)
|
132 |
+
wsUrl = wsParam.create_url()
|
133 |
+
ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
|
134 |
+
ws.appid = appid
|
135 |
+
ws.question = question
|
136 |
+
ws.domain = domain
|
137 |
+
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
|
138 |
+
|
139 |
+
|
ChatHaruhi/SparkGPT.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SparkGPT.py
|
2 |
+
from . import SparkApi
|
3 |
+
#以下密钥信息从os环境获取
|
4 |
+
import os
|
5 |
+
|
6 |
+
appid = os.environ['APPID']
|
7 |
+
api_secret = os.environ['APISecret']
|
8 |
+
api_key = os.environ['APIKey']
|
9 |
+
|
10 |
+
|
11 |
+
from .BaseLLM import BaseLLM
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
class SparkGPT(BaseLLM):
|
17 |
+
|
18 |
+
def __init__(self, model="Spark2.0"):
|
19 |
+
super(SparkGPT,self).__init__()
|
20 |
+
if model == "Spark2.0":
|
21 |
+
self.domain = "generalv2" # v2.0版本
|
22 |
+
self.Spark_url = "ws://spark-api.xf-yun.com/v2.1/chat" # v2.0环境的地址
|
23 |
+
elif model == "Spark1.5":
|
24 |
+
self.domain = "general" # v1.5版本
|
25 |
+
self.Spark_url = "ws://spark-api.xf-yun.com/v1.1/chat" # v1.5环境的地址
|
26 |
+
else:
|
27 |
+
raise Exception("Unknown Spark model")
|
28 |
+
# SparkApi.answer =""
|
29 |
+
self.messages = ''
|
30 |
+
|
31 |
+
|
32 |
+
def initialize_message(self):
|
33 |
+
self.messages = ''
|
34 |
+
|
35 |
+
def ai_message(self, payload):
|
36 |
+
self.messages = self.messages + "AI: " + payload
|
37 |
+
|
38 |
+
def system_message(self, payload):
|
39 |
+
self.messages = self.messages + "System: " + payload
|
40 |
+
|
41 |
+
def user_message(self, payload):
|
42 |
+
self.messages = self.messages + "User: " + payload
|
43 |
+
|
44 |
+
def get_response(self):
|
45 |
+
# question = checklen(getText("user",Input))
|
46 |
+
|
47 |
+
message_json = [{"role": "user", "content": self.messages}]
|
48 |
+
SparkApi.answer =""
|
49 |
+
SparkApi.main(appid,api_key,api_secret,self.Spark_url,self.domain,message_json)
|
50 |
+
return SparkApi.answer
|
51 |
+
|
52 |
+
def print_prompt(self):
|
53 |
+
print(type(self.messages))
|
54 |
+
print(self.messages)
|
ChatHaruhi/__init__.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ChatHaruhi: Reviving Anime Character in Reality via Large Language Model
|
2 |
+
#
|
3 |
+
# ChatHaruhi 2.0, built by Cheng Li and Weishi Mi
|
4 |
+
#
|
5 | |
6 |
+
#
|
7 |
+
# Weishi Mi is a second-year graduate student at Tsinghua University, majoring in computer science.
|
8 |
+
# Weishi Mi is pursuing a job or a PhD position, which who will be available next year
|
9 |
+
#
|
10 |
+
# homepage https://github.com/LC1332/Chat-Haruhi-Suzumiya
|
11 |
+
#
|
12 |
+
# ChatHaruhi is a chatbot that can revive anime characters in reality.
|
13 |
+
# the 2.0 version was built by Cheng Li and Weishi Mi.
|
14 |
+
#
|
15 |
+
# Please cite our paper if you use this code for research:
|
16 |
+
#
|
17 |
+
# @misc{li2023chatharuhi,
|
18 |
+
# title={ChatHaruhi: Reviving Anime Character in Reality via Large Language Model},
|
19 |
+
# author={Cheng Li and Ziang Leng and Chenxi Yan and Junyi Shen and Hao Wang and Weishi MI and Yaying Fei and Xiaoyang Feng and Song Yan and HaoSheng Wang and Linkang Zhan and Yaokai Jia and Pingyu Wu and Haozhen Sun},
|
20 |
+
# year={2023},
|
21 |
+
# eprint={2308.09597},
|
22 |
+
# archivePrefix={arXiv},
|
23 |
+
# primaryClass={cs.CL}
|
24 |
+
# }
|
25 |
+
|
26 |
+
from .ChatHaruhi import ChatHaruhi
|
ChatHaruhi/role_name_to_file.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ChatHaruhi: Reviving Anime Character in Reality via Large Language Model
|
2 |
+
#
|
3 |
+
# ChatHaruhi 2.0, built by Cheng Li and Weishi Mi
|
4 |
+
#
|
5 | |
6 |
+
#
|
7 |
+
# Weishi Mi is a second-year graduate student at Tsinghua University, majoring in computer science.
|
8 |
+
# Weishi Mi is pursuing a job or a PhD position, which who will be available next year
|
9 |
+
#
|
10 |
+
# homepage https://github.com/LC1332/Chat-Haruhi-Suzumiya
|
11 |
+
#
|
12 |
+
# ChatHaruhi is a chatbot that can revive anime characters in reality.
|
13 |
+
# the 2.0 version was built by Cheng Li and Weishi Mi.
|
14 |
+
#
|
15 |
+
# Please cite our paper if you use this code for research:
|
16 |
+
#
|
17 |
+
# @misc{li2023chatharuhi,
|
18 |
+
# title={ChatHaruhi: Reviving Anime Character in Reality via Large Language Model},
|
19 |
+
# author={Cheng Li and Ziang Leng and Chenxi Yan and Junyi Shen and Hao Wang and Weishi MI and Yaying Fei and Xiaoyang Feng and Song Yan and HaoSheng Wang and Linkang Zhan and Yaokai Jia and Pingyu Wu and Haozhen Sun},
|
20 |
+
# year={2023},
|
21 |
+
# eprint={2308.09597},
|
22 |
+
# archivePrefix={arXiv},
|
23 |
+
# primaryClass={cs.CL}
|
24 |
+
# }
|
25 |
+
#
|
26 |
+
# if you have attempt to add a new character, please add the role name here
|
27 |
+
#
|
28 |
+
|
29 |
+
role_name_Haruhiu = {'汤师爷': 'tangshiye', 'tangshiye': 'tangshiye', 'Tangshiye': 'tangshiye',
|
30 |
+
'慕容复': 'murongfu', 'murongfu': 'murongfu', 'Murongfu': 'murongfu',
|
31 |
+
'李云龙': 'liyunlong', 'liyunlong': 'liyunlong', 'Liyunlong': 'liyunlong',
|
32 |
+
'Luna': 'Luna', '王多鱼': 'wangduoyu', 'wangduoyu': 'wangduoyu',
|
33 |
+
'Wangduoyu': 'wangduoyu', 'Ron': 'Ron', '鸠摩智': 'jiumozhi',
|
34 |
+
'jiumozhi': 'jiumozhi', 'Jiumozhi': 'jiumozhi', 'Snape': 'Snape',
|
35 |
+
'凉宫春日': 'haruhi', 'haruhi': 'haruhi', 'Haruhi': 'haruhi',
|
36 |
+
'Malfoy': 'Malfoy', '虚竹': 'xuzhu', 'xuzhu': 'xuzhu',
|
37 |
+
'Xuzhu': 'xuzhu', '萧峰': 'xiaofeng',
|
38 |
+
'xiaofeng': 'xiaofeng', 'Xiaofeng': 'xiaofeng', '段誉': 'duanyu',
|
39 |
+
'duanyu': 'duanyu', 'Duanyu': 'duanyu', 'Hermione': 'Hermione',
|
40 |
+
'Dumbledore': 'Dumbledore', '王语嫣': 'wangyuyan', 'wangyuyan':
|
41 |
+
'wangyuyan', 'Wangyuyan': 'wangyuyan', 'Harry': 'Harry',
|
42 |
+
'McGonagall': 'McGonagall', '白展堂': 'baizhantang',
|
43 |
+
'baizhantang': 'baizhantang', 'Baizhantang': 'baizhantang',
|
44 |
+
'佟湘玉': 'tongxiangyu', 'tongxiangyu': 'tongxiangyu',
|
45 |
+
'Tongxiangyu': 'tongxiangyu', '郭芙蓉': 'guofurong',
|
46 |
+
'guofurong': 'guofurong', 'Guofurong': 'guofurong', '流浪者': 'wanderer',
|
47 |
+
'wanderer': 'wanderer', 'Wanderer': 'wanderer', '钟离': 'zhongli',
|
48 |
+
'zhongli': 'zhongli', 'Zhongli': 'zhongli', '胡桃': 'hutao', 'hutao': 'hutao',
|
49 |
+
'Hutao': 'hutao', 'Sheldon': 'Sheldon', 'Raj': 'Raj',
|
50 |
+
'Penny': 'Penny', '韦小宝': 'weixiaobao', 'weixiaobao': 'weixiaobao',
|
51 |
+
'Weixiaobao': 'weixiaobao', '乔峰': 'qiaofeng', 'qiaofeng': 'qiaofeng',
|
52 |
+
'Qiaofeng': 'qiaofeng', '神里绫华': 'ayaka', 'ayaka': 'ayaka',
|
53 |
+
'Ayaka': 'ayaka', '雷电将军': 'raidenShogun', 'raidenShogun': 'raidenShogun',
|
54 |
+
'RaidenShogun': 'raidenShogun', '于谦': 'yuqian', 'yuqian': 'yuqian',
|
55 |
+
'Yuqian': 'yuqian', 'Professor McGonagall': 'McGonagall',
|
56 |
+
'Professor Dumbledore': 'Dumbledore'}
|
57 |
+
|
58 |
+
# input role_name , nick name is also allowed
|
59 |
+
# output folder_role_name and url url = f'https://github.com/LC1332/Haruhi-2-Dev/raw/main/data/character_in_zip/{role_name}.zip'
|
60 |
+
def get_folder_role_name(role_name):
|
61 |
+
if role_name in role_name_Haruhiu:
|
62 |
+
folder_role_name = role_name_Haruhiu[role_name]
|
63 |
+
url = f'https://github.com/LC1332/Haruhi-2-Dev/raw/main/data/character_in_zip/{folder_role_name}.zip'
|
64 |
+
return folder_role_name, url
|
65 |
+
else:
|
66 |
+
print('role_name {} not found, using haruhi as default'.format(role_name))
|
67 |
+
return get_folder_role_name('haruhi')
|
ChatHaruhi/utils.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import Namespace
|
2 |
+
|
3 |
+
import openai
|
4 |
+
from transformers import AutoModel, AutoTokenizer
|
5 |
+
import torch
|
6 |
+
import random
|
7 |
+
|
8 |
+
import tiktoken
|
9 |
+
import re
|
10 |
+
|
11 |
+
|
12 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
13 |
+
|
14 |
+
_luotuo_model = None
|
15 |
+
|
16 |
+
_luotuo_model_en = None
|
17 |
+
_luotuo_en_tokenizer = None
|
18 |
+
|
19 |
+
_enc_model = None
|
20 |
+
|
21 |
+
def tiktokenizer( text ):
|
22 |
+
global _enc_model
|
23 |
+
|
24 |
+
if _enc_model is None:
|
25 |
+
_enc_model = tiktoken.get_encoding("cl100k_base")
|
26 |
+
|
27 |
+
return len(_enc_model.encode(text))
|
28 |
+
|
29 |
+
def response_postprocess(text,dialogue_bra_token = '「',dialogue_ket_token = '」'):
|
30 |
+
lines = text.split('\n')
|
31 |
+
new_lines = ""
|
32 |
+
|
33 |
+
first_name = None
|
34 |
+
|
35 |
+
for line in lines:
|
36 |
+
line = line.strip(" ")
|
37 |
+
match = re.match(r'^(.*?)[::]' + dialogue_bra_token + r"(.*?)" + dialogue_ket_token + r"$", line)
|
38 |
+
|
39 |
+
|
40 |
+
if match:
|
41 |
+
curr_name = match.group(1)
|
42 |
+
# print(curr_name)
|
43 |
+
if first_name is None:
|
44 |
+
first_name = curr_name
|
45 |
+
new_lines += (match.group(2))
|
46 |
+
else:
|
47 |
+
if curr_name != first_name:
|
48 |
+
return first_name + ":" + dialogue_bra_token + new_lines + dialogue_ket_token
|
49 |
+
else:
|
50 |
+
new_lines += (match.group(2))
|
51 |
+
|
52 |
+
else:
|
53 |
+
if first_name == None:
|
54 |
+
return text
|
55 |
+
else:
|
56 |
+
return first_name + ":" + dialogue_bra_token + new_lines + dialogue_ket_token
|
57 |
+
return first_name + ":" + dialogue_bra_token + new_lines + dialogue_ket_token
|
58 |
+
|
59 |
+
def download_models():
|
60 |
+
print("正在下载Luotuo-Bert")
|
61 |
+
# Import our models. The package will take care of downloading the models automatically
|
62 |
+
model_args = Namespace(do_mlm=None, pooler_type="cls", temp=0.05, mlp_only_train=False,
|
63 |
+
init_embeddings_model=None)
|
64 |
+
model = AutoModel.from_pretrained("silk-road/luotuo-bert-medium", trust_remote_code=True, model_args=model_args).to(
|
65 |
+
device)
|
66 |
+
print("Luotuo-Bert下载完毕")
|
67 |
+
return model
|
68 |
+
|
69 |
+
def get_luotuo_model():
|
70 |
+
global _luotuo_model
|
71 |
+
if _luotuo_model is None:
|
72 |
+
_luotuo_model = download_models()
|
73 |
+
return _luotuo_model
|
74 |
+
|
75 |
+
|
76 |
+
def luotuo_embedding(model, texts):
|
77 |
+
# Tokenize the texts_source
|
78 |
+
tokenizer = AutoTokenizer.from_pretrained("silk-road/luotuo-bert-medium")
|
79 |
+
inputs = tokenizer(texts, padding=True, truncation=False, return_tensors="pt")
|
80 |
+
inputs = inputs.to(device)
|
81 |
+
# Extract the embeddings
|
82 |
+
# Get the embeddings
|
83 |
+
with torch.no_grad():
|
84 |
+
embeddings = model(**inputs, output_hidden_states=True, return_dict=True, sent_emb=True).pooler_output
|
85 |
+
return embeddings
|
86 |
+
|
87 |
+
def luotuo_en_embedding( texts ):
|
88 |
+
# this function implemented by Cheng
|
89 |
+
global _luotuo_model_en
|
90 |
+
global _luotuo_en_tokenizer
|
91 |
+
|
92 |
+
if _luotuo_model_en is None:
|
93 |
+
_luotuo_en_tokenizer = AutoTokenizer.from_pretrained("silk-road/luotuo-bert-en")
|
94 |
+
_luotuo_model_en = AutoModel.from_pretrained("silk-road/luotuo-bert-en").to(device)
|
95 |
+
|
96 |
+
if _luotuo_en_tokenizer is None:
|
97 |
+
_luotuo_en_tokenizer = AutoTokenizer.from_pretrained("silk-road/luotuo-bert-en")
|
98 |
+
|
99 |
+
inputs = _luotuo_en_tokenizer(texts, padding=True, truncation=False, return_tensors="pt")
|
100 |
+
inputs = inputs.to(device)
|
101 |
+
|
102 |
+
with torch.no_grad():
|
103 |
+
embeddings = _luotuo_model_en(**inputs, output_hidden_states=True, return_dict=True, sent_emb=True).pooler_output
|
104 |
+
|
105 |
+
return embeddings
|
106 |
+
|
107 |
+
|
108 |
+
def get_embedding_for_chinese(model, texts):
|
109 |
+
model = model.to(device)
|
110 |
+
# str or strList
|
111 |
+
texts = texts if isinstance(texts, list) else [texts]
|
112 |
+
# 截断
|
113 |
+
for i in range(len(texts)):
|
114 |
+
if len(texts[i]) > 510:
|
115 |
+
texts[i] = texts[i][:510]
|
116 |
+
if len(texts) >= 64:
|
117 |
+
embeddings = []
|
118 |
+
chunk_size = 64
|
119 |
+
for i in range(0, len(texts), chunk_size):
|
120 |
+
embeddings.append(luotuo_embedding(model, texts[i: i + chunk_size]))
|
121 |
+
return torch.cat(embeddings, dim=0)
|
122 |
+
else:
|
123 |
+
return luotuo_embedding(model, texts)
|
124 |
+
|
125 |
+
|
126 |
+
def is_chinese_or_english(text):
|
127 |
+
text = list(text)
|
128 |
+
is_chinese, is_english = 0, 0
|
129 |
+
|
130 |
+
for char in text:
|
131 |
+
# 判断字符的Unicode值是否在中文字符的Unicode范围内
|
132 |
+
if '\u4e00' <= char <= '\u9fa5':
|
133 |
+
is_chinese += 4
|
134 |
+
# 判断字符是否为英文字符(包括大小写字母和常见标点符号)
|
135 |
+
elif ('\u0041' <= char <= '\u005a') or ('\u0061' <= char <= '\u007a'):
|
136 |
+
is_english += 1
|
137 |
+
if is_chinese >= is_english:
|
138 |
+
return "chinese"
|
139 |
+
else:
|
140 |
+
return "english"
|
141 |
+
|
142 |
+
|
143 |
+
def get_embedding_for_english(text, model="text-embedding-ada-002"):
|
144 |
+
text = text.replace("\n", " ")
|
145 |
+
return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding']
|
146 |
+
|
147 |
+
import os
|
148 |
+
|
149 |
+
def luotuo_openai_embedding(texts, is_chinese= None ):
|
150 |
+
"""
|
151 |
+
when input is chinese, use luotuo_embedding
|
152 |
+
when input is english, use openai_embedding
|
153 |
+
texts can be a list or a string
|
154 |
+
when texts is a list, return a list of embeddings, using batch inference
|
155 |
+
when texts is a string, return a single embedding
|
156 |
+
"""
|
157 |
+
|
158 |
+
openai_key = os.environ.get("OPENAI_API_KEY")
|
159 |
+
|
160 |
+
if isinstance(texts, list):
|
161 |
+
index = random.randint(0, len(texts) - 1)
|
162 |
+
if openai_key is None or is_chinese_or_english(texts[index]) == "chinese":
|
163 |
+
return [embed.cpu().tolist() for embed in get_embedding_for_chinese(get_luotuo_model(), texts)]
|
164 |
+
else:
|
165 |
+
return [get_embedding_for_english(text) for text in texts]
|
166 |
+
else:
|
167 |
+
if openai_key is None or is_chinese_or_english(texts) == "chinese":
|
168 |
+
return get_embedding_for_chinese(get_luotuo_model(), texts)[0].cpu().tolist()
|
169 |
+
else:
|
170 |
+
return get_embedding_for_english(texts)
|
171 |
+
|
172 |
+
|
173 |
+
# compute cosine similarity between two vector
|
174 |
+
def get_cosine_similarity( v1, v2):
|
175 |
+
v1 = torch.tensor(v1).to(device)
|
176 |
+
v2 = torch.tensor(v2).to(device)
|
177 |
+
return torch.cosine_similarity(v1, v2, dim=0).item()
|
178 |
+
|
179 |
+
|
180 |
+
|