Kian Kyars commited on
Commit
b0866e4
·
1 Parent(s): 859d9d2

Add all popular open-source models to ALL_MODELS for Spaces

Browse files
Files changed (1) hide show
  1. app.py +15 -13
app.py CHANGED
@@ -10,41 +10,44 @@ image = modal.Image.debian_slim().pip_install(
10
  "gradio"
11
  )
12
 
13
- app = modal.App("agentic-demo")
14
 
15
  ALL_MODELS = [
16
- "meta-llama/Llama-2-70b-hf",
17
- "mistralai/Mixtral-8x7B-v0.1",
18
  "Qwen/Qwen-72B",
19
- "mistralai/Mistral-7B-v0.1"
 
 
 
 
 
 
 
20
  ]
21
 
22
  def debate_agent(topic, agent_a_model, agent_b_model, judge_model):
23
  if len({agent_a_model, agent_b_model, judge_model}) < 3:
24
  return {"error": "Please select three different models."}
25
  # Agent A
26
- tokenizer_a = AutoTokenizer.from_pretrained(agent_a_model, token=os.environ["HUGGINGFACE_API_KEY"])
27
  model_a = AutoModelForCausalLM.from_pretrained(
28
  agent_a_model,
29
- token=os.environ["HUGGINGFACE_API_KEY"],
30
  load_in_4bit=True,
31
  device_map="auto"
32
  )
33
  prompt_a = f"Debate as Agent A: {topic}"
34
  inputs_a = tokenizer_a(prompt_a, return_tensors="pt").to(model_a.device)
35
- outputs_a = model_a.generate(**inputs_a, max_new_tokens=200)
36
  arg_a = tokenizer_a.decode(outputs_a[0], skip_special_tokens=True)
37
  # Agent B
38
- tokenizer_b = AutoTokenizer.from_pretrained(agent_b_model, token=os.environ["HUGGINGFACE_API_KEY"])
39
  model_b = AutoModelForCausalLM.from_pretrained(
40
  agent_b_model,
41
- token=os.environ["HUGGINGFACE_API_KEY"],
42
  load_in_4bit=True,
43
  device_map="auto"
44
  )
45
  prompt_b = f"Debate as Agent B: {topic}"
46
  inputs_b = tokenizer_b(prompt_b, return_tensors="pt").to(model_b.device)
47
- outputs_b = model_b.generate(**inputs_b, max_new_tokens=200)
48
  arg_b = tokenizer_b.decode(outputs_b[0], skip_special_tokens=True)
49
  # Judge
50
  judge_prompt = (
@@ -54,15 +57,14 @@ def debate_agent(topic, agent_a_model, agent_b_model, judge_model):
54
  f"Agent B says: {arg_b}\n"
55
  f"Summarize both arguments and pick a winner (A or B) with a short justification."
56
  )
57
- tokenizer_j = AutoTokenizer.from_pretrained(judge_model, token=os.environ["HUGGINGFACE_API_KEY"])
58
  model_j = AutoModelForCausalLM.from_pretrained(
59
  judge_model,
60
- token=os.environ["HUGGINGFACE_API_KEY"],
61
  load_in_4bit=True,
62
  device_map="auto"
63
  )
64
  inputs_j = tokenizer_j(judge_prompt, return_tensors="pt").to(model_j.device)
65
- outputs_j = model_j.generate(**inputs_j, max_new_tokens=200)
66
  judge_summary = tokenizer_j.decode(outputs_j[0], skip_special_tokens=True)
67
  return {
68
  "Agent A": arg_a,
 
10
  "gradio"
11
  )
12
 
13
+ app = modal.App("agentic-demo", image=image)
14
 
15
  ALL_MODELS = [
 
 
16
  "Qwen/Qwen-72B",
17
+ "deepseek-ai/deepseek-llm-67b-base",
18
+ "openchat/openchat-3.5-1210",
19
+ "microsoft/phi-2",
20
+ "google/gemma-7b",
21
+ "01-ai/Yi-34B",
22
+ "upstage/SOLAR-10.7B-v1.0",
23
+ "microsoft/Orca-2-13b",
24
+ "lmsys/vicuna-13b-v1.5"
25
  ]
26
 
27
  def debate_agent(topic, agent_a_model, agent_b_model, judge_model):
28
  if len({agent_a_model, agent_b_model, judge_model}) < 3:
29
  return {"error": "Please select three different models."}
30
  # Agent A
31
+ tokenizer_a = AutoTokenizer.from_pretrained(agent_a_model)
32
  model_a = AutoModelForCausalLM.from_pretrained(
33
  agent_a_model,
 
34
  load_in_4bit=True,
35
  device_map="auto"
36
  )
37
  prompt_a = f"Debate as Agent A: {topic}"
38
  inputs_a = tokenizer_a(prompt_a, return_tensors="pt").to(model_a.device)
39
+ outputs_a = model_a.generate(**inputs_a, max_new_tokens=10000)
40
  arg_a = tokenizer_a.decode(outputs_a[0], skip_special_tokens=True)
41
  # Agent B
42
+ tokenizer_b = AutoTokenizer.from_pretrained(agent_b_model)
43
  model_b = AutoModelForCausalLM.from_pretrained(
44
  agent_b_model,
 
45
  load_in_4bit=True,
46
  device_map="auto"
47
  )
48
  prompt_b = f"Debate as Agent B: {topic}"
49
  inputs_b = tokenizer_b(prompt_b, return_tensors="pt").to(model_b.device)
50
+ outputs_b = model_b.generate(**inputs_b, max_new_tokens=10000)
51
  arg_b = tokenizer_b.decode(outputs_b[0], skip_special_tokens=True)
52
  # Judge
53
  judge_prompt = (
 
57
  f"Agent B says: {arg_b}\n"
58
  f"Summarize both arguments and pick a winner (A or B) with a short justification."
59
  )
60
+ tokenizer_j = AutoTokenizer.from_pretrained(judge_model)
61
  model_j = AutoModelForCausalLM.from_pretrained(
62
  judge_model,
 
63
  load_in_4bit=True,
64
  device_map="auto"
65
  )
66
  inputs_j = tokenizer_j(judge_prompt, return_tensors="pt").to(model_j.device)
67
+ outputs_j = model_j.generate(**inputs_j, max_new_tokens=10000)
68
  judge_summary = tokenizer_j.decode(outputs_j[0], skip_special_tokens=True)
69
  return {
70
  "Agent A": arg_a,