Omnibus commited on
Commit
e714960
1 Parent(s): 7e2719d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -47
app.py CHANGED
@@ -71,7 +71,7 @@ def generate(prompt, history,max_new_tokens,temperature=temperature,top_p=top_p,
71
 
72
  for response in stream:
73
  output += response.token.text
74
- yield "",[(prompt,output)]
75
 
76
  lines = output.strip().strip("\n").split("\n")
77
  #history=""
@@ -86,52 +86,7 @@ def generate(prompt, history,max_new_tokens,temperature=temperature,top_p=top_p,
86
  print(line)
87
  if line.startswith("5. "):
88
  print(line)
89
- return "",[(prompt,output)]
90
-
91
-
92
- additional_inputs=[
93
- gr.Textbox(
94
- label="System Prompt",
95
- max_lines=1,
96
- interactive=True,
97
- ),
98
- gr.Slider(
99
- label="Temperature",
100
- value=0.9,
101
- minimum=0.0,
102
- maximum=1.0,
103
- step=0.05,
104
- interactive=True,
105
- info="Higher values produce more diverse outputs",
106
- ),
107
- gr.Slider(
108
- label="Max new tokens",
109
- value=1048,
110
- minimum=0,
111
- maximum=1048*10,
112
- step=64,
113
- interactive=True,
114
- info="The maximum numbers of new tokens",
115
- ),
116
- gr.Slider(
117
- label="Top-p (nucleus sampling)",
118
- value=0.90,
119
- minimum=0.0,
120
- maximum=1,
121
- step=0.05,
122
- interactive=True,
123
- info="Higher values sample more low-probability tokens",
124
- ),
125
- gr.Slider(
126
- label="Repetition penalty",
127
- value=1.2,
128
- minimum=1.0,
129
- maximum=2.0,
130
- step=0.05,
131
- interactive=True,
132
- info="Penalize repeated tokens",
133
- )
134
- ]
135
 
136
 
137
  def clear_fn():
 
71
 
72
  for response in stream:
73
  output += response.token.text
74
+ yield "",[history,(prompt,output)]
75
 
76
  lines = output.strip().strip("\n").split("\n")
77
  #history=""
 
86
  print(line)
87
  if line.startswith("5. "):
88
  print(line)
89
+ return "",[history,(prompt,output)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
 
92
  def clear_fn():