Update app.py
Browse filesChange temperature to 0.3
app.py
CHANGED
|
@@ -3,13 +3,14 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
| 3 |
import torch
|
| 4 |
|
| 5 |
TOKEN_LIMIT = 2048
|
| 6 |
-
TEMPERATURE = 0.
|
| 7 |
REPETITION_PENALTY = 1.05
|
| 8 |
MAX_NEW_TOKENS = 500
|
| 9 |
MODEL_NAME = "ericzzz/falcon-rw-1b-chat"
|
| 10 |
|
| 11 |
# fmt: off
|
| 12 |
st.write("**💬Tiny Chat with [Falcon-RW-1B-Chat](https://huggingface.co/ericzzz/falcon-rw-1b-chat)**" )
|
|
|
|
| 13 |
|
| 14 |
# fmt: on
|
| 15 |
if "chat_history" not in st.session_state:
|
|
|
|
| 3 |
import torch
|
| 4 |
|
| 5 |
TOKEN_LIMIT = 2048
|
| 6 |
+
TEMPERATURE = 0.3
|
| 7 |
REPETITION_PENALTY = 1.05
|
| 8 |
MAX_NEW_TOKENS = 500
|
| 9 |
MODEL_NAME = "ericzzz/falcon-rw-1b-chat"
|
| 10 |
|
| 11 |
# fmt: off
|
| 12 |
st.write("**💬Tiny Chat with [Falcon-RW-1B-Chat](https://huggingface.co/ericzzz/falcon-rw-1b-chat)**" )
|
| 13 |
+
st.write("*The model operates on free-tier hardware, which may lead to slower performance during periods of high demand.*")
|
| 14 |
|
| 15 |
# fmt: on
|
| 16 |
if "chat_history" not in st.session_state:
|