ChatGLM3 / app.py
kakuguo's picture
Upload 52 files
afd4069
raw
history blame contribute delete
No virus
2.47 kB
# 导包
import gradio as gr
import openai
import tiktoken
# 初始化
openai.api_key = 'none'
openai.api_base="http://localhost:8080/v1"
def count_token(prompt,answer):
encoding = tiktoken.get_encoding("cl100k_base")
prompt_count = len(encoding.encode(prompt))
answer_count = len(encoding.encode(answer))
total_count = prompt_count + answer_count
print("Prompt消耗 %d Token, 回答消耗 %d Token,总共消耗 %d Token" % (prompt_count, answer_count, total_count))
def concatenate_history(history):
text = ""
for item in history:
text += f"User: {item[0]}\nBot: {item[1]}\n"
return text
def summarize(text):
# 使用 ChatCompletion.Create 方法生成文本
if text is None:
return ""
else:
response = openai.ChatCompletion.create(
model="SoulChat", # 对话模型的名称
messages=[{"role": "user", "content": text + "\n\n请总结一下User和Bot分别说了什么,并输出为markdown的格式\n"}],
temperature=0, # 值在[0,1]之间,越大表示回复越具有不确定性
max_tokens=500 # 回复最大的字符数
)
generated_text = response['choices'][0]['message']['content']
count_token(text,generated_text)
print("总结回复:%s"%generated_text)
return generated_text
#设置回复
def reply(prompt):
# 使用 ChatCompletion.Create 方法生成文本
response = openai.ChatCompletion.create(
model="SoulChat", # 对话模型的名称
messages=[{"role": "user", "content": prompt}],
temperature=0, # 值在[0,1]之间,越大表示回复越具有不确定性
max_tokens=4096 # 回复最大的字符数
)
generated_text = response['choices'][0]['message']['content']
count_token(prompt,generated_text)
print(generated_text)
return generated_text
# 定义发送功能
def send(user_message, history):
if not user_message:
return '', history
history_text = concatenate_history(history)
# prp="上下文是:"+summarize(history_text)+"\n请回答:"+user_message
prp=user_message
response = reply(prp)
return '', history + [[user_message, response]]
#定义创建功能
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.TextArea()
send_btn = gr.Button('发送')
send_btn.click(send, inputs=[msg,chatbot], outputs=[msg,chatbot], show_progress=True)
demo.launch()