Spaces:
Running
Running
update nvidia default
Browse files
app.py
CHANGED
|
@@ -583,6 +583,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 583 |
'nvidia/llama3-chatqa-1.5-8b',
|
| 584 |
'nvidia-nemotron-4-340b-instruct',
|
| 585 |
# Meta Models
|
|
|
|
| 586 |
'meta/codellama-70b',
|
| 587 |
'meta/llama2-70b',
|
| 588 |
'meta/llama3-8b',
|
|
@@ -620,7 +621,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 620 |
'upstage/solar-10.7b-instruct',
|
| 621 |
'snowflake/arctic'
|
| 622 |
],
|
| 623 |
-
value='
|
| 624 |
label="Select NVIDIA Model",
|
| 625 |
interactive=True
|
| 626 |
)
|
|
@@ -628,7 +629,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 628 |
nvidia_interface = gr.load(
|
| 629 |
name=nvidia_model.value,
|
| 630 |
src=nvidia_gradio.registry,
|
| 631 |
-
accept_token=True,
|
| 632 |
fill_height=True
|
| 633 |
)
|
| 634 |
|
|
@@ -636,7 +637,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
| 636 |
return gr.load(
|
| 637 |
name=new_model,
|
| 638 |
src=nvidia_gradio.registry,
|
| 639 |
-
accept_token=True,
|
| 640 |
fill_height=True
|
| 641 |
)
|
| 642 |
|
|
|
|
| 583 |
'nvidia/llama3-chatqa-1.5-8b',
|
| 584 |
'nvidia-nemotron-4-340b-instruct',
|
| 585 |
# Meta Models
|
| 586 |
+
'meta/llama-3.1-70b-instruct', # Added Llama 3.1 70B
|
| 587 |
'meta/codellama-70b',
|
| 588 |
'meta/llama2-70b',
|
| 589 |
'meta/llama3-8b',
|
|
|
|
| 621 |
'upstage/solar-10.7b-instruct',
|
| 622 |
'snowflake/arctic'
|
| 623 |
],
|
| 624 |
+
value='meta/llama-3.1-70b-instruct', # Changed default to Llama 3.1 70B
|
| 625 |
label="Select NVIDIA Model",
|
| 626 |
interactive=True
|
| 627 |
)
|
|
|
|
| 629 |
nvidia_interface = gr.load(
|
| 630 |
name=nvidia_model.value,
|
| 631 |
src=nvidia_gradio.registry,
|
| 632 |
+
accept_token=True,
|
| 633 |
fill_height=True
|
| 634 |
)
|
| 635 |
|
|
|
|
| 637 |
return gr.load(
|
| 638 |
name=new_model,
|
| 639 |
src=nvidia_gradio.registry,
|
| 640 |
+
accept_token=True,
|
| 641 |
fill_height=True
|
| 642 |
)
|
| 643 |
|