5to9 commited on
Commit
f9e62c2
·
1 Parent(s): 4e199ac

init gradio

Browse files
Files changed (3) hide show
  1. .gitignore +4 -0
  2. app.py +146 -46
  3. requirements.txt +4 -1
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ env/
2
+ .streamlit/
3
+ models/
4
+ .DS_Store
app.py CHANGED
@@ -1,62 +1,162 @@
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
 
 
 
 
 
 
 
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
 
 
 
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
 
62
  if __name__ == "__main__":
 
1
+ import os
2
+ import time
3
+ import spaces
4
+ import torch
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
6
  import gradio as gr
7
+ from threading import Thread
8
+ import logging
9
 
10
+ logging.basicConfig(level=logging.DEBUG)
11
+
12
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
+ MODEL = "AGI-0/Artificium-llama3.1-8B-001"
14
+
15
+ TITLE = """<h2>Link to the model: <a href="https://huggingface.co/AGI-0/Artificium-llama3.1-8B-001" title="Visit the model repository on Hugging Face">AGI-0/Artificium-llama3.1-8B-001</a> please leave a like to the repository if you liked it.</h2>"""
16
+
17
+ PLACEHOLDER = """
18
+ <center>
19
+ <p>Hi! How can I help you today?</p>
20
+ </center>
21
  """
22
+
23
+
24
+ CSS = """
25
+ .duplicate-button {
26
+ margin: auto !important;
27
+ color: white !important;
28
+ background: black !important;
29
+ border-radius: 100vh !important;
30
+ }
31
+ h3 {
32
+ text-align: center;
33
+ }
34
  """
 
35
 
36
+ device = "cuda" # for GPU usage or "cpu" for CPU usage
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
39
+ model = AutoModelForCausalLM.from_pretrained(
40
+ MODEL,
41
+ torch_dtype=torch.bfloat16,
42
+ device_map="auto")
43
 
44
+ @spaces.GPU()
45
+ def stream_chat(
46
+ message: str,
47
+ history: list,
48
+ system_prompt: str,
49
+ temperature: float = 0.8,
50
+ max_new_tokens: int = 1024,
51
+ top_p: float = 1.0,
52
+ top_k: int = 50,
53
+ penalty: float = 1.2,
54
  ):
55
+ print(f'message: {message}')
56
+ print(f'history: {history}')
57
 
58
+ conversation = [
59
+ {"role": "system", "content": ""}
60
+ ]
61
+ for prompt, answer in history:
62
+ conversation.extend([
63
+ {"role": "user", "content": prompt},
64
+ {"role": "assistant", "content": answer},
65
+ ])
66
 
67
+ conversation.append({"role": "user", "content": message})
68
 
69
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
70
+
71
+ streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
72
+
73
+ generate_kwargs = dict(
74
+ input_ids=input_ids,
75
+ max_new_tokens = max_new_tokens,
76
+ do_sample = False if temperature == 0 else True,
77
+ top_p = top_p,
78
+ top_k = top_k,
79
+ temperature = temperature,
80
+ repetition_penalty=penalty,
81
+ eos_token_id=[128001,128008,128009],
82
+ streamer=streamer,
83
+ )
84
 
85
+ with torch.no_grad():
86
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
87
+ thread.start()
88
+
89
+ buffer = ""
90
+ for new_text in streamer:
91
+ buffer += new_text
92
+ yield buffer
93
 
94
+
95
+ chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
96
 
97
+ with gr.Blocks(css=CSS, theme="soft") as demo:
98
+ gr.HTML(TITLE)
99
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
100
+ gr.ChatInterface(
101
+ fn=stream_chat,
102
+ chatbot=chatbot,
103
+ fill_height=True,
104
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
105
+ additional_inputs=[
106
+ gr.Textbox(
107
+ value="",
108
+ label="",
109
+ render=False,
110
+ ),
111
+ gr.Slider(
112
+ minimum=0,
113
+ maximum=1,
114
+ step=0.1,
115
+ value=0.3,
116
+ label="Temperature",
117
+ render=False,
118
+ ),
119
+ gr.Slider(
120
+ minimum=128,
121
+ maximum=8192,
122
+ step=1,
123
+ value=2048,
124
+ label="Max new tokens",
125
+ render=False,
126
+ ),
127
+ gr.Slider(
128
+ minimum=0.0,
129
+ maximum=1.0,
130
+ step=0.1,
131
+ value=1.0,
132
+ label="top_p",
133
+ render=False,
134
+ ),
135
+ gr.Slider(
136
+ minimum=1,
137
+ maximum=50,
138
+ step=1,
139
+ value=50,
140
+ label="top_k",
141
+ render=False,
142
+ ),
143
+ gr.Slider(
144
+ minimum=0.0,
145
+ maximum=2.0,
146
+ step=0.1,
147
+ value=1.1,
148
+ label="Repetition penalty",
149
+ render=False,
150
+ ),
151
+ ],
152
+ examples=[
153
+ ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
154
+ ["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."],
155
+ ["Tell me a random fun fact about the Roman Empire."],
156
+ ["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
157
+ ],
158
+ cache_examples=False,
159
+ )
160
 
161
 
162
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- huggingface_hub==0.22.2
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ torch==2.4.0
3
+ transformers[sentencepiece]
4
+ accelerate