Text Generation
Transformers
PyTorch
Chinese
English
llama
text-generation-inference
fireballoon commited on
Commit
7c803ff
1 Parent(s): 23bfc11

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from threading import Thread
2
+
3
+ import torch
4
+ import gradio as gr
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
6
+
7
+ model_id = "fireballoon/baichuan-vicuna-chinese-7b"
8
+ torch_device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ print("Running on device:", torch_device)
10
+ print("CPU threads:", torch.get_num_threads())
11
+
12
+
13
+ if torch_device == "cuda":
14
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16).cuda()
15
+ else:
16
+ model = AutoModelForCausalLM.from_pretrained(model_id)
17
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
18
+
19
+
20
+ def run_generation(history, *args, **kwargs):
21
+ # Get the model and tokenizer, and tokenize the user text.
22
+
23
+ instruction = "A chat between a curious user and an artificial intelligence assistant. " \
24
+ "The assistant gives helpful, detailed, and polite answers to the user's questions."
25
+ context = ''.join([f" USER: {turn[0].strip()} ASSISTANT: {turn[1].strip()} </s>" for turn in history[:-1]])
26
+ prompt = instruction + context + f" USER: {history[-1][0].strip()} ASSISTANT:"
27
+
28
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
29
+
30
+ print()
31
+ print(prompt)
32
+ print('##', input_ids.size())
33
+
34
+ # Start generation on a separate thread, so that we don't block the UI. The text is pulled from the streamer
35
+ # in the main thread. Adds timeout to the streamer to handle exceptions in the generation thread.
36
+ streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
37
+ generate_kwargs = dict(
38
+ input_ids=input_ids,
39
+ streamer=streamer,
40
+ max_new_tokens=2048,
41
+ do_sample=True,
42
+ temperature=0.7,
43
+ repetition_penalty=1.1,
44
+ top_p=0.85
45
+ )
46
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
47
+ t.start()
48
+
49
+ # Pull the generated text from the streamer, and update the model output.
50
+ history[-1][1] = ""
51
+
52
+ print("")
53
+ for new_text in streamer:
54
+ history[-1][1] += new_text
55
+ print(new_text, end="", flush=True)
56
+ yield history
57
+ print('</s>')
58
+ return history
59
+
60
+
61
+ def reset_textbox():
62
+ return gr.update(value='')
63
+
64
+
65
+ with gr.Blocks() as demo:
66
+ gr.Markdown(
67
+ "# Baichuan Vicuna Chinese\n"
68
+ f"[{model_id}](https://huggingface.co/{model_id}):使用中英双语sharegpt数据全参数微调的对话模型,基于baichuan-7b"
69
+ )
70
+ chatbot = gr.Chatbot().style(height=600)
71
+ msg = gr.Textbox()
72
+ clear = gr.ClearButton([msg, chatbot])
73
+
74
+ def user(user_message, history):
75
+ return gr.update(value="", interactive=False), history + [[user_message, None]]
76
+
77
+ response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
78
+ run_generation, chatbot, chatbot
79
+ )
80
+ response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
81
+
82
+ demo.queue()
83
+ demo.launch(server_name='0.0.0.0')