nehcgs commited on
Commit
47c1964
·
verified ·
1 Parent(s): e3b2d3e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -6
README.md CHANGED
@@ -50,7 +50,6 @@ import json
50
  from typing import Any, Dict, List
51
  from transformers import AutoModelForCausalLM, AutoTokenizer
52
 
53
- # Specify the desired model name here
54
  model_name = "katanemo/Arch-Agent-3B"
55
 
56
  model = AutoModelForCausalLM.from_pretrained(
@@ -58,8 +57,6 @@ model = AutoModelForCausalLM.from_pretrained(
58
  )
59
  tokenizer = AutoTokenizer.from_pretrained(model_name)
60
 
61
-
62
- # Please use the recommended prompt for each model.
63
  TASK_PROMPT = (
64
  "You are a helpful assistant designed to assist with the user query by making one or more function calls if needed."
65
  "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
@@ -95,7 +92,6 @@ tools = [
95
  }
96
  ]
97
 
98
-
99
  # Helper function to create the system prompt for our model
100
  def format_prompt(tools: List[Dict[str, Any]]):
101
  tool_text = "\n".join(
@@ -103,7 +99,6 @@ def format_prompt(tools: List[Dict[str, Any]]):
103
  )
104
  return TASK_PROMPT.format(tool_text=tool_text)
105
 
106
-
107
  system_prompt = format_prompt(tools)
108
 
109
  messages = [
@@ -111,7 +106,6 @@ messages = [
111
  {"role": "user", "content": "What is the weather in Seattle?"},
112
  ]
113
 
114
- #### 2.2.3 Run inference
115
  model_inputs = tokenizer.apply_chat_template(
116
  messages, add_generation_prompt=True, return_tensors="pt", return_dict=True
117
  ).to(model.device)
 
50
  from typing import Any, Dict, List
51
  from transformers import AutoModelForCausalLM, AutoTokenizer
52
 
 
53
  model_name = "katanemo/Arch-Agent-3B"
54
 
55
  model = AutoModelForCausalLM.from_pretrained(
 
57
  )
58
  tokenizer = AutoTokenizer.from_pretrained(model_name)
59
 
 
 
60
  TASK_PROMPT = (
61
  "You are a helpful assistant designed to assist with the user query by making one or more function calls if needed."
62
  "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
 
92
  }
93
  ]
94
 
 
95
  # Helper function to create the system prompt for our model
96
  def format_prompt(tools: List[Dict[str, Any]]):
97
  tool_text = "\n".join(
 
99
  )
100
  return TASK_PROMPT.format(tool_text=tool_text)
101
 
 
102
  system_prompt = format_prompt(tools)
103
 
104
  messages = [
 
106
  {"role": "user", "content": "What is the weather in Seattle?"},
107
  ]
108
 
 
109
  model_inputs = tokenizer.apply_chat_template(
110
  messages, add_generation_prompt=True, return_tensors="pt", return_dict=True
111
  ).to(model.device)