pooroligarch commited on
Commit
1c2b495
·
1 Parent(s): adda8f6

Switch to quantized model

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -2,7 +2,7 @@ from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
  client = InferenceClient(
5
- "teknium/Mistral-Trismegistus-7B"
6
  )
7
 
8
  def format_prompt(message, history):
 
2
  import gradio as gr
3
 
4
  client = InferenceClient(
5
+ "TheBloke/Mistral-Trismegistus-7B-AWQ"
6
  )
7
 
8
  def format_prompt(message, history):