File size: 6,959 Bytes
04c25c5
0c3f7c7
04c25c5
9b9128d
0c3f7c7
 
40918cf
1d73b44
 
 
 
 
 
 
 
 
 
40918cf
0c3f7c7
ffdcd0f
 
 
 
 
 
 
 
 
 
 
 
 
 
a3c1778
ffdcd0f
a3c1778
 
ffdcd0f
cb5b8fa
62bc7da
cb5b8fa
d786de6
e9b47ff
8686afb
e9b47ff
19ac70e
f74448c
19ac70e
f74448c
cb5b8fa
ffdcd0f
e9b47ff
1d73b44
cb5b8fa
 
 
 
 
369dc1f
cb18d46
a3c1778
 
 
04c25c5
 
 
4462aec
 
 
00ff648
 
cb18d46
5d52fdf
62bc7da
5d52fdf
db203c4
00ff648
1850cee
 
 
 
 
 
 
 
 
cb5b8fa
1850cee
 
 
 
00ff648
1850cee
00ff648
 
62bc7da
 
 
cb5b8fa
04c25c5
0c3f7c7
 
 
 
 
 
 
 
 
 
d7942b7
b1cfeaa
fe8c2db
62bc7da
fe8c2db
 
 
 
 
 
04c25c5
6e0c63c
34a59de
6b083b8
04c25c5
 
 
 
 
71a4c63
 
 
 
 
 
 
6b083b8
cb5b8fa
04c25c5
fe8c2db
6b083b8
fe8c2db
db203c4
fe8c2db
 
 
db203c4
0c3f7c7
 
 
 
 
 
 
 
 
 
 
 
 
ffdcd0f
 
 
8debc62
cb5b8fa
 
 
 
8debc62
62bc7da
d0d4c9d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import gradio as gr
from gradio_client import Client
from huggingface_hub import InferenceClient
import random
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")

'''models=[
    "google/gemma-7b",
    "google/gemma-7b-it",
    "google/gemma-2b",
    "google/gemma-2b-it"
]
clients=[
InferenceClient(models[0]),
InferenceClient(models[1]),
InferenceClient(models[2]),
InferenceClient(models[3]),
]'''


models=[
    "google/gemma-7b",
    "google/gemma-7b-it",
    "google/gemma-2b",
    "google/gemma-2b-it",
]
client_z=[]


def load_models(inp):
    
    print(type(inp))
    print(inp)
    print(models[inp])
    client_z.clear()
    client_z.append(InferenceClient(models[inp]))
    return gr.update(label=models[inp])

VERBOSE=False

def format_prompt(message, history, cust_p):
    prompt = ""
    if history:
        #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
        for user_prompt, bot_response in history:
            prompt += f"{user_prompt}\n"
            #print(prompt)
            prompt += f"{bot_response}\n"
            #print(prompt)
    #prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
    prompt+=cust_p.replace("USER_INPUT",message)
    return prompt

def custom_prompt(prompt):
    return prompt


def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
    #token max=8192
    hist_len=0
    #client=clients[int(client_choice)-1]
    client=client_z[0]

    if not history:
        history = []
        hist_len=0
    if not memory:
        memory = []
        mem_len=0        
    if memory:
        for ea in memory[0-chat_mem:]:
            hist_len+=len(str(ea))
    in_len=len(system_prompt+prompt)+hist_len

    if (in_len+tokens) > 8000:
        history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
        yield history,memory
    else:
        generate_kwargs = dict(
            temperature=temp,
            max_new_tokens=tokens,
            top_p=top_p,
            repetition_penalty=rep_p,
            do_sample=True,
            seed=seed,
        )
        formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
        stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
        output = ""
        for response in stream:
            output += response.token.text
            yield [(prompt,output)],memory
        history.append((prompt,output))
        memory.append((prompt,output))
        yield history,memory
    if VERBOSE==True:
        print("\n######### HIST "+str(in_len))
        print("\n######### TOKENS "+str(tokens))        
        #print("\n######### PROMPT "+str(len(formatted_prompt)))

def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
    print(chatblock)
    tog = 0
    if chatblock:
        tog = 3
    result = ss_client.predict(str(chat),height,width,chatblock,header,theme,wait,api_name="/run_script")
    out = f'https://omnibus-html-image-current-tab.hf.space/file={result[tog]}'
    print(out)
    return out

def clear_fn():
    return None,None,None,None
rand_val=random.randint(1,1111111111111111)

def check_rand(inp,val):
    if inp==True:
        return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
    else:
        return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
    
with gr.Blocks() as app:
    memory=gr.State()
    gr.HTML("""<center><h1 style='font-size:xx-large;'>Google Gemma Models</h1><br><h3>running on Huggingface Inference Client</h3><br><h7>EXPERIMENTAL""")
    chat_b = gr.Chatbot(height=500)
    with gr.Group():
        with gr.Row():
            with gr.Column(scale=3):
                inp = gr.Textbox(label="Prompt")
                sys_inp = gr.Textbox(label="System Prompt (optional)")
                with gr.Row():
                    with gr.Column(scale=2):
                        btn = gr.Button("Chat")
                    with gr.Column(scale=1):
                        with gr.Group():
                            stop_btn=gr.Button("Stop")
                            clear_btn=gr.Button("Clear")                
                client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
                custom_prompt=gr.Textbox(lines=5,value="<start_of_turn>user\nUSER_INPUT<end_of_turn>\n<start_of_turn>model\n")
            with gr.Column(scale=1):
                with gr.Group():
                    rand = gr.Checkbox(label="Random Seed", value=True)
                    seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
                    tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
                    temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
                    top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
                    rep_p=gr.Slider(label="Repetition Penalty",step=0.1, minimum=0.1, maximum=2.0, value=1.0)
                    chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=4)
        with gr.Accordion(label="Screenshot",open=False):
            with gr.Row():
                with gr.Column(scale=3):
                    im_btn=gr.Button("Screenshot")
                    img=gr.Image(type='filepath')
                with gr.Column(scale=1):
                    with gr.Row():
                        im_height=gr.Number(label="Height",value=5000)
                        im_width=gr.Number(label="Width",value=500)
                    wait_time=gr.Number(label="Wait Time",value=3000)
                    theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
                    chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)

    
    client_choice.change(load_models,client_choice,[chat_b])

    im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
    
    chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
    go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
    
    stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
    clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
app.queue(default_concurrency_limit=10).launch()