Spaces:
Running
on
Zero
Running
on
Zero
Update app_dialogue.py
Browse files- app_dialogue.py +3 -3
app_dialogue.py
CHANGED
|
@@ -19,19 +19,19 @@ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENT
|
|
| 19 |
|
| 20 |
DEVICE = torch.device("cuda")
|
| 21 |
MODELS = {
|
| 22 |
-
"
|
| 23 |
"HuggingFaceM4/idefics2",
|
| 24 |
trust_remote_code=True,
|
| 25 |
torch_dtype=torch.bfloat16,
|
| 26 |
token=os.environ["HF_AUTH_TOKEN"],
|
| 27 |
revision="25bb7ad6d9ab9e43d5002d30f857d4106ed964f3",
|
| 28 |
).to(DEVICE),
|
| 29 |
-
"
|
| 30 |
"HuggingFaceM4/idefics2",
|
| 31 |
trust_remote_code=True,
|
| 32 |
torch_dtype=torch.bfloat16,
|
| 33 |
token=os.environ["HF_AUTH_TOKEN"],
|
| 34 |
-
revision="
|
| 35 |
).to(DEVICE),
|
| 36 |
# "285 - continued pretraining on text sft - opt 2'000": AutoModelForCausalLM.from_pretrained(
|
| 37 |
# "HuggingFaceM4/idefics2",
|
|
|
|
| 19 |
|
| 20 |
DEVICE = torch.device("cuda")
|
| 21 |
MODELS = {
|
| 22 |
+
"288_ter - mix8 - opt 5'800": AutoModelForCausalLM.from_pretrained(
|
| 23 |
"HuggingFaceM4/idefics2",
|
| 24 |
trust_remote_code=True,
|
| 25 |
torch_dtype=torch.bfloat16,
|
| 26 |
token=os.environ["HF_AUTH_TOKEN"],
|
| 27 |
revision="25bb7ad6d9ab9e43d5002d30f857d4106ed964f3",
|
| 28 |
).to(DEVICE),
|
| 29 |
+
"288_ter - mix 8 - opt 11'000": AutoModelForCausalLM.from_pretrained(
|
| 30 |
"HuggingFaceM4/idefics2",
|
| 31 |
trust_remote_code=True,
|
| 32 |
torch_dtype=torch.bfloat16,
|
| 33 |
token=os.environ["HF_AUTH_TOKEN"],
|
| 34 |
+
revision="7eccbf5178f85eee8fab9995f31ab12441ce767a",
|
| 35 |
).to(DEVICE),
|
| 36 |
# "285 - continued pretraining on text sft - opt 2'000": AutoModelForCausalLM.from_pretrained(
|
| 37 |
# "HuggingFaceM4/idefics2",
|