Text Generation
Transformers
Safetensors
PyTorch
English
llama
nvidia
llama-3
conversational
text-generation-inference
suhara commited on
Commit
2e4216b
·
verified ·
1 Parent(s): 187502b

Upload 2 files

Browse files
llama_nemotron_nano_generic_tool_calling.jinja ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n' -}}
3
+ {%- if messages[0].role == 'system' -%}
4
+ {{- messages[0].content + '\n\n' -}}
5
+ {%- endif -%}
6
+ {{- '<AVAILABLE_TOOLS>[' -}}
7
+ {%- for tool in tools -%}
8
+ {{- (tool.function if tool.function is defined else tool) | tojson -}}{{- ', ' if not loop.last else '' -}}
9
+ {%- endfor -%}
10
+ {{- ']</AVAILABLE_TOOLS>' -}}{{- '<|eot_id|>' -}}
11
+ {%- else %}
12
+ {%- if messages[0].role == 'system' %}
13
+ {{- '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n' + messages[0].content + '<|eot_id|>' -}}
14
+ {%- endif %}
15
+ {%- endif %}
16
+ {%- for message in messages -%}
17
+ {%- if (message.role == 'user') -%}
18
+ {{- '<|start_header_id|>user<|end_header_id|>\n\n' + message.content + '<|eot_id|>' -}}
19
+ {%- elif message.role == 'assistant' -%}
20
+ {%- set content = message.content -%}
21
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' + content -}}
22
+ {%- if message.tool_calls -%}
23
+ {{- '<TOOLCALL>[' -}}
24
+ {%- for tool_call in message.tool_calls -%}
25
+ {%- if tool_call.function -%}
26
+ {%- set tool_call = tool_call.function -%}
27
+ {%- endif -%}
28
+ {{- '{"name": "' }}{{- tool_call.name }}{{- '", "arguments": ' -}}
29
+ {%- if tool_call.arguments is string -%}
30
+ {{- tool_call.arguments -}}
31
+ {%- else -%}
32
+ {{- tool_call.arguments | tojson -}}
33
+ {%- endif -%}
34
+ {{- ', ' if not loop.last else '' -}}
35
+ {%- endfor -%}
36
+ {{- ']</TOOLCALL>' -}}
37
+ {%- endif %}
38
+ {{- '<|eot_id|>' -}}
39
+ {%- elif message.role == 'tool' -%}
40
+ {%- if loop.first or (messages[loop.index0 - 1].role != 'tool') -%}
41
+ {{- '<|start_header_id|>user<|end_header_id|>\n\n' }}{{- '<TOOL_RESPONSE>[' -}}
42
+ {%- endif -%}
43
+ {{- message.content -}}{{- ', ' if not loop.last and (messages[loop.index0 + 1].role == 'tool') else '' -}}
44
+ {%- if loop.last or (messages[loop.index0 + 1].role != 'tool') -%}
45
+ {{- ']</TOOL_RESPONSE>' -}}{{- '<|eot_id|>' -}}
46
+ {%- endif %}
47
+ {%- endif %}
48
+ {%- endfor %}
49
+ {%- if add_generation_prompt %}
50
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
51
+ {%- endif %}
llama_nemotron_nano_toolcall_parser.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import json
4
+ import re
5
+ from collections.abc import Sequence
6
+ from typing import Union
7
+
8
+ import partial_json_parser
9
+ from partial_json_parser.core.options import Allow
10
+
11
+ from vllm.entrypoints.openai.protocol import (
12
+ ChatCompletionRequest,
13
+ DeltaFunctionCall, DeltaMessage,
14
+ DeltaToolCall,
15
+ ExtractedToolCallInformation,
16
+ FunctionCall,
17
+ ToolCall,
18
+ )
19
+ from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import (
20
+ ToolParser,
21
+ ToolParserManager,
22
+ )
23
+ from vllm.logger import init_logger
24
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
25
+ from vllm.utils import random_uuid
26
+
27
+ logger = init_logger(__name__)
28
+
29
+
30
+ @ToolParserManager.register_module("llama_nemotron_json")
31
+ class LlamaNemotronJSONToolParser(ToolParser):
32
+
33
+ def __init__(self, tokenizer: AnyTokenizer):
34
+ super().__init__(tokenizer)
35
+
36
+ self.current_tool_name_sent: bool = False
37
+ self.prev_tool_call_arr: list[dict] = []
38
+ self.current_tool_id: int = -1
39
+ self.streamed_args_for_tool: list[str] = []
40
+
41
+ self.tool_call_start_token: str = "<TOOLCALL>"
42
+ self.tool_call_end_token: str = "</TOOLCALL>"
43
+
44
+ self.tool_call_regex = re.compile(r"<TOOLCALL>(.*?)</TOOLCALL>", re.DOTALL)
45
+
46
+ def extract_tool_calls(
47
+ self,
48
+ model_output: str,
49
+ request: ChatCompletionRequest,
50
+ ) -> ExtractedToolCallInformation:
51
+
52
+ if self.tool_call_start_token not in model_output:
53
+ return ExtractedToolCallInformation(
54
+ tools_called=False,
55
+ tool_calls=[],
56
+ content=model_output,
57
+ )
58
+
59
+ else:
60
+
61
+ try:
62
+ str_tool_calls = self.tool_call_regex.findall(model_output)[0].strip()
63
+ if not str_tool_calls.startswith("["):
64
+ str_tool_calls = "[" + str_tool_calls
65
+ if not str_tool_calls.endswith("]"):
66
+ str_tool_calls = "]" + str_tool_calls
67
+ json_tool_calls = json.loads(str_tool_calls)
68
+ tool_calls = []
69
+ for tool_call in json_tool_calls:
70
+ try:
71
+ tool_calls.append(ToolCall(
72
+ type="function",
73
+ function=FunctionCall(
74
+ name=tool_call["name"],
75
+ arguments=json.dumps(tool_call["arguments"], ensure_ascii=False) \
76
+ if isinstance(tool_call["arguments"], dict) else tool_call["arguments"],
77
+ ),
78
+ ))
79
+ except:
80
+ continue
81
+
82
+ content = model_output[:model_output.rfind(self.tool_call_start_token)]
83
+
84
+ return ExtractedToolCallInformation(
85
+ tools_called=True,
86
+ tool_calls=tool_calls,
87
+ content=content if content else None,
88
+ )
89
+
90
+ except Exception:
91
+ logger.exception(f"Error in extracting tool call from response. Response: {model_output}")
92
+ return ExtractedToolCallInformation(
93
+ tools_called=False,
94
+ tool_calls=[],
95
+ content=model_output,
96
+ )
97
+
98
+ def extract_tool_calls_streaming(
99
+ self,
100
+ previous_text: str,
101
+ current_text: str,
102
+ delta_text: str,
103
+ previous_token_ids: Sequence[int],
104
+ current_token_ids: Sequence[int],
105
+ delta_token_ids: Sequence[int],
106
+ request: ChatCompletionRequest,
107
+ ) -> Union[DeltaMessage, None]:
108
+
109
+ raise NotImplementedError("Tool calling is not supported in streaming mode!")