File size: 3,652 Bytes
a1f93e9
d72c532
 
 
8988bbf
 
5799733
d72c532
459fbe3
 
 
 
 
8988bbf
5799733
8988bbf
 
5799733
a1f93e9
d72c532
459fbe3
5799733
d72c532
8988bbf
 
 
 
 
 
 
a1f93e9
 
d72c532
a1f93e9
d72c532
8988bbf
 
 
 
 
 
 
 
 
 
d72c532
 
d89d143
d72c532
5799733
 
 
 
d72c532
 
 
 
 
 
459fbe3
 
d72c532
5799733
 
459fbe3
 
 
 
5799733
 
 
 
459fbe3
5799733
 
 
 
 
8988bbf
459fbe3
 
d72c532
d89d143
 
459fbe3
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
"""
来自 https://github.com/OpenLMLab/MOSS/blob/main/moss_web_demo_gradio.py


# 难点

## TODO


-[x] 代码和表格的预览
-[x] markdown解析:mdtex2html
-[ ] 可编辑chatbot:https://github.com/gradio-app/gradio/issues/4444
-[ ] 乱码问题


## Reference

- https://github.com/GaiZhenbiao/ChuanhuChatGPT/
"""

import config
from app_util import *

system_list = [
    "You are a helpful assistant.",
    "你是一个导游。",
    "你是一个英语老师。",
    "你是一个程序员。",
    "你是一个心理咨询师。",
]

"""
TODO: 使用说明
"""
with gr.Blocks() as demo:
    # Knowledge Distillation through Self Chatting
    gr.HTML("""<h1 align="center">Distilling the Knowledge through Self Chatting</h1>""")
    system = gr.Dropdown(
        choices=system_list,
        value=system_list[0],
        allow_custom_value=True,
        interactive=True,
        label="System message"
    )
    chatbot = gr.Chatbot(avatar_images=("assets/man.png", "assets/bot.png"))
    with gr.Row():
        with gr.Column(scale=4):
            generated_text = gr.Textbox(show_label=False, placeholder="...", lines=10, visible=False)
            with gr.Row():
                generate_btn = gr.Button("🤔️ Generate")
                retry_btn = gr.Button("🔄  Regenerate")
                undo_btn = gr.Button("↩️ Undo")
                clear_btn = gr.Button("🗑️  Clear")  # 🧹 Clear History (清除历史)
                stop_btn = gr.Button("停止生成", variant="primary")
        with gr.Column(scale=1):
            # generate_query_btn = gr.Button("Generate First Query")
            gr.Dropdown(
                ["moss", "chatglm-2", "chatpdf"],
                value="moss",
                label="model",
                interactive=True,
                # info="Will add more animals later!"
            )

        slider_max_tokens = gr.Slider(minimum=1, maximum=config.MAX_SEQUENCE_LENGTH,
                                      value=config.DEFAULT_MAX_TOKENS, step=1, label="Max tokens")
        slider_temperature = gr.Slider(minimum=0.1, maximum=10.0,
                                       value=config.DEFAULT_TEMPERATURE, step=0.1, label="Temperature",
                                       info="Larger temperature increase the randomness")
        slider_top_p = gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=config.DEFAULT_TOP_P,
            step=0.05,
            label="Top-p (nucleus sampling)",
        )

    ########
    history = gr.State([{"role": "system", "content": system_list[0]}])
    system.change(reset_state, inputs=[system], outputs=[chatbot, history])
    clear_btn.click(reset_state, inputs=[system], outputs=[chatbot, history])

    generate_btn.click(generate, [chatbot, history], outputs=[generated_text, chatbot, history],
                       show_progress="full")
    retry_btn.click(undo_generate, [chatbot, history], outputs=[generated_text, chatbot, history],
                    show_progress="full")
    retry_btn.click(generate, [chatbot, history], outputs=[generated_text, chatbot, history],
                    show_progress="full")
    undo_btn.click(undo_generate, [chatbot, history], outputs=[generated_text, chatbot, history],
                   show_progress="full")

    slider_max_tokens.change(set_max_tokens, inputs=[slider_max_tokens])
    slider_top_p.change(set_top_p, inputs=[slider_top_p])
    slider_temperature.change(set_temperature, inputs=[slider_temperature])

# demo.queue().launch(share=False, server_name="0.0.0.0")
# demo.queue().launch(concurrency_count=1, max_size=5)
demo.queue().launch()