Spaces:
Paused
Paused
:zap: [Enhance] Add new model yi-1.5-34b, and remove unavailable models
Browse files- apis/chat_api.py +0 -1
- constants/models.py +60 -45
- messagers/message_composer.py +2 -2
apis/chat_api.py
CHANGED
|
@@ -64,7 +64,6 @@ class ChatAPIApp:
|
|
| 64 |
raise INVALID_API_KEY_ERROR
|
| 65 |
|
| 66 |
class ChatCompletionsPostItem(BaseModel):
|
| 67 |
-
|
| 68 |
model: str = Field(
|
| 69 |
default="nous-mixtral-8x7b",
|
| 70 |
description="(str) `nous-mixtral-8x7b`",
|
|
|
|
| 64 |
raise INVALID_API_KEY_ERROR
|
| 65 |
|
| 66 |
class ChatCompletionsPostItem(BaseModel):
|
|
|
|
| 67 |
model: str = Field(
|
| 68 |
default="nous-mixtral-8x7b",
|
| 69 |
description="(str) `nous-mixtral-8x7b`",
|
constants/models.py
CHANGED
|
@@ -2,11 +2,12 @@ MODEL_MAP = {
|
|
| 2 |
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # [Recommended]
|
| 3 |
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 4 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
|
| 5 |
-
|
| 6 |
"gemma-7b": "google/gemma-1.1-7b-it",
|
| 7 |
-
"
|
| 8 |
-
"
|
| 9 |
-
"
|
|
|
|
| 10 |
"default": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 11 |
}
|
| 12 |
|
|
@@ -15,24 +16,31 @@ AVAILABLE_MODELS = list(MODEL_MAP.keys())
|
|
| 15 |
PRO_MODELS = ["command-r-plus", "llama3-70b", "zephyr-141b"]
|
| 16 |
|
| 17 |
STOP_SEQUENCES_MAP = {
|
|
|
|
| 18 |
"mixtral-8x7b": "</s>",
|
|
|
|
| 19 |
"nous-mixtral-8x7b": "<|im_end|>",
|
|
|
|
| 20 |
"mistral-7b": "</s>",
|
| 21 |
-
#
|
|
|
|
|
|
|
| 22 |
"gemma-7b": "<eos>",
|
| 23 |
-
"
|
|
|
|
| 24 |
}
|
| 25 |
|
| 26 |
TOKEN_LIMIT_MAP = {
|
| 27 |
"mixtral-8x7b": 32768,
|
| 28 |
"nous-mixtral-8x7b": 32768,
|
| 29 |
"mistral-7b": 32768,
|
| 30 |
-
|
| 31 |
"gemma-7b": 8192,
|
| 32 |
-
"
|
| 33 |
-
"
|
| 34 |
-
"
|
| 35 |
-
"
|
|
|
|
| 36 |
}
|
| 37 |
|
| 38 |
TOKEN_RESERVED = 20
|
|
@@ -61,46 +69,53 @@ AVAILABLE_MODELS_DICTS = [
|
|
| 61 |
"created": 1700000000,
|
| 62 |
"owned_by": "mistralai",
|
| 63 |
},
|
| 64 |
-
# {
|
| 65 |
-
# "id": "openchat-3.5",
|
| 66 |
-
# "description": "[openchat/openchat-3.5-0106]: https://huggingface.co/openchat/openchat-3.5-0106",
|
| 67 |
-
# "object": "model",
|
| 68 |
-
# "created": 1700000000,
|
| 69 |
-
# "owned_by": "openchat",
|
| 70 |
-
# },
|
| 71 |
-
{
|
| 72 |
-
"id": "gemma-7b",
|
| 73 |
-
"description": "[google/gemma-1.1-7b-it]: https://huggingface.co/google/gemma-1.1-7b-it",
|
| 74 |
-
"object": "model",
|
| 75 |
-
"created": 1700000000,
|
| 76 |
-
"owned_by": "Google",
|
| 77 |
-
},
|
| 78 |
-
{
|
| 79 |
-
"id": "command-r-plus",
|
| 80 |
-
"description": "[CohereForAI/c4ai-command-r-plus]: https://huggingface.co/CohereForAI/c4ai-command-r-plus",
|
| 81 |
-
"object": "model",
|
| 82 |
-
"created": 1700000000,
|
| 83 |
-
"owned_by": "CohereForAI",
|
| 84 |
-
},
|
| 85 |
-
{
|
| 86 |
-
"id": "llama3-70b",
|
| 87 |
-
"description": "[meta-llama/Meta-Llama-3-70B]: https://huggingface.co/meta-llama/Meta-Llama-3-70B",
|
| 88 |
-
"object": "model",
|
| 89 |
-
"created": 1700000000,
|
| 90 |
-
"owned_by": "Meta",
|
| 91 |
-
},
|
| 92 |
{
|
| 93 |
-
"id": "
|
| 94 |
-
"description": "[
|
| 95 |
"object": "model",
|
| 96 |
"created": 1700000000,
|
| 97 |
-
"owned_by": "
|
| 98 |
},
|
| 99 |
{
|
| 100 |
-
"id": "
|
| 101 |
-
"description": "[
|
| 102 |
"object": "model",
|
| 103 |
"created": 1700000000,
|
| 104 |
-
"owned_by": "
|
| 105 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
]
|
|
|
|
| 2 |
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # [Recommended]
|
| 3 |
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 4 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
|
| 5 |
+
"yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
|
| 6 |
"gemma-7b": "google/gemma-1.1-7b-it",
|
| 7 |
+
# "openchat-3.5": "openchat/openchat-3.5-0106",
|
| 8 |
+
# "command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
| 9 |
+
# "llama3-70b": "meta-llama/Meta-Llama-3-70B-Instruct",
|
| 10 |
+
# "zephyr-141b": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
| 11 |
"default": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 12 |
}
|
| 13 |
|
|
|
|
| 16 |
PRO_MODELS = ["command-r-plus", "llama3-70b", "zephyr-141b"]
|
| 17 |
|
| 18 |
STOP_SEQUENCES_MAP = {
|
| 19 |
+
# https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1/blob/main/tokenizer_config.json#L33
|
| 20 |
"mixtral-8x7b": "</s>",
|
| 21 |
+
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO/blob/main/tokenizer_config.json#L50
|
| 22 |
"nous-mixtral-8x7b": "<|im_end|>",
|
| 23 |
+
# https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/tokenizer_config.json#L33
|
| 24 |
"mistral-7b": "</s>",
|
| 25 |
+
# https://huggingface.co/01-ai/Yi-1.5-34B-Chat/blob/main/tokenizer_config.json#L42
|
| 26 |
+
"yi-1.5-34b": "<|im_end|>",
|
| 27 |
+
# https://huggingface.co/google/gemma-1.1-7b-it/blob/main/tokenizer_config.json#L1509
|
| 28 |
"gemma-7b": "<eos>",
|
| 29 |
+
# "openchat-3.5": "<|end_of_turn|>",
|
| 30 |
+
# "command-r-plus": "<|END_OF_TURN_TOKEN|>",
|
| 31 |
}
|
| 32 |
|
| 33 |
TOKEN_LIMIT_MAP = {
|
| 34 |
"mixtral-8x7b": 32768,
|
| 35 |
"nous-mixtral-8x7b": 32768,
|
| 36 |
"mistral-7b": 32768,
|
| 37 |
+
"yi-1.5-34b": 4096,
|
| 38 |
"gemma-7b": 8192,
|
| 39 |
+
# "openchat-3.5": 8192,
|
| 40 |
+
# "command-r-plus": 32768,
|
| 41 |
+
# "llama3-70b": 8192,
|
| 42 |
+
# "zephyr-141b": 2048,
|
| 43 |
+
# "gpt-3.5-turbo": 8192,
|
| 44 |
}
|
| 45 |
|
| 46 |
TOKEN_RESERVED = 20
|
|
|
|
| 69 |
"created": 1700000000,
|
| 70 |
"owned_by": "mistralai",
|
| 71 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
{
|
| 73 |
+
"id": "yi-1.5-34b",
|
| 74 |
+
"description": "[01-ai/Yi-1.5-34B-Chat]: https://huggingface.co/01-ai/Yi-1.5-34B-Chat",
|
| 75 |
"object": "model",
|
| 76 |
"created": 1700000000,
|
| 77 |
+
"owned_by": "01-ai",
|
| 78 |
},
|
| 79 |
{
|
| 80 |
+
"id": "gemma-7b",
|
| 81 |
+
"description": "[google/gemma-1.1-7b-it]: https://huggingface.co/google/gemma-1.1-7b-it",
|
| 82 |
"object": "model",
|
| 83 |
"created": 1700000000,
|
| 84 |
+
"owned_by": "Google",
|
| 85 |
},
|
| 86 |
+
# {
|
| 87 |
+
# "id": "openchat-3.5",
|
| 88 |
+
# "description": "[openchat/openchat-3.5-0106]: https://huggingface.co/openchat/openchat-3.5-0106",
|
| 89 |
+
# "object": "model",
|
| 90 |
+
# "created": 1700000000,
|
| 91 |
+
# "owned_by": "openchat",
|
| 92 |
+
# },
|
| 93 |
+
# {
|
| 94 |
+
# "id": "command-r-plus",
|
| 95 |
+
# "description": "[CohereForAI/c4ai-command-r-plus]: https://huggingface.co/CohereForAI/c4ai-command-r-plus",
|
| 96 |
+
# "object": "model",
|
| 97 |
+
# "created": 1700000000,
|
| 98 |
+
# "owned_by": "CohereForAI",
|
| 99 |
+
# },
|
| 100 |
+
# {
|
| 101 |
+
# "id": "llama3-70b",
|
| 102 |
+
# "description": "[meta-llama/Meta-Llama-3-70B]: https://huggingface.co/meta-llama/Meta-Llama-3-70B",
|
| 103 |
+
# "object": "model",
|
| 104 |
+
# "created": 1700000000,
|
| 105 |
+
# "owned_by": "Meta",
|
| 106 |
+
# },
|
| 107 |
+
# {
|
| 108 |
+
# "id": "zephyr-141b",
|
| 109 |
+
# "description": "[HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1]: https://huggingface.co/HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
| 110 |
+
# "object": "model",
|
| 111 |
+
# "created": 1700000000,
|
| 112 |
+
# "owned_by": "Huggingface",
|
| 113 |
+
# },
|
| 114 |
+
# {
|
| 115 |
+
# "id": "gpt-3.5-turbo",
|
| 116 |
+
# "description": "[openai/gpt-3.5-turbo]: https://platform.openai.com/docs/models/gpt-3-5-turbo",
|
| 117 |
+
# "object": "model",
|
| 118 |
+
# "created": 1700000000,
|
| 119 |
+
# "owned_by": "OpenAI",
|
| 120 |
+
# },
|
| 121 |
]
|
messagers/message_composer.py
CHANGED
|
@@ -150,8 +150,8 @@ class MessageComposer:
|
|
| 150 |
self.merged_str = "<bos>" + "\n".join(self.merged_str_list)
|
| 151 |
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
| 152 |
# https://huggingface.co/openchat/openchat-3.5-0106
|
| 153 |
-
#
|
| 154 |
-
elif self.model in ["openchat-3.5", "command-r-plus", "gemma-7b"]:
|
| 155 |
tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
| 156 |
self.merged_str = tokenizer.apply_chat_template(
|
| 157 |
messages, tokenize=False, add_generation_prompt=True
|
|
|
|
| 150 |
self.merged_str = "<bos>" + "\n".join(self.merged_str_list)
|
| 151 |
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
| 152 |
# https://huggingface.co/openchat/openchat-3.5-0106
|
| 153 |
+
# https://huggingface.co/01-ai/Yi-1.5-34B-Chat
|
| 154 |
+
elif self.model in ["openchat-3.5", "command-r-plus", "gemma-7b", "yi-1.5-34b"]:
|
| 155 |
tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
| 156 |
self.merged_str = tokenizer.apply_chat_template(
|
| 157 |
messages, tokenize=False, add_generation_prompt=True
|