Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- agents.py +21 -16
- requirements.txt +2 -1
agents.py
CHANGED
@@ -2,20 +2,25 @@ import os
|
|
2 |
import json
|
3 |
import time
|
4 |
import traceback
|
|
|
5 |
|
6 |
-
from
|
7 |
-
from langchain_core.output_parsers import JsonOutputParser
|
8 |
-
from pydantic import BaseModel, Field
|
9 |
-
|
10 |
from langchain_groq import ChatGroq
|
11 |
-
from
|
|
|
12 |
|
13 |
load_dotenv()
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
analysis_llm = ChatGroq(
|
16 |
model="llama-3.1-8b-instant",
|
17 |
temperature=0.8,
|
18 |
-
max_tokens=800,
|
19 |
timeout=None,
|
20 |
max_retries=2,
|
21 |
api_key=os.getenv("GROQ_ANALYSIS_API_KEY"),
|
@@ -30,21 +35,21 @@ post_content_llm = ChatGroq(
|
|
30 |
|
31 |
|
32 |
def basic_analysis(news):
|
33 |
-
|
34 |
-
prompt = PromptTemplate.from_file(
|
35 |
-
template_file="prompts/news_selector.yml",
|
36 |
-
input_variables=["news_object"],
|
37 |
-
)
|
38 |
|
39 |
for _ in range(5):
|
40 |
try:
|
41 |
response = analysis_llm.invoke(
|
42 |
-
prompt.
|
|
|
43 |
)
|
44 |
print("################ BASIC ANALYSIS AGENT RESPONSE ################")
|
45 |
print(response.content)
|
46 |
print("################ BASIC ANALYSIS END AGENT RESPONSE ################")
|
47 |
|
|
|
|
|
|
|
48 |
start_index = response.content.find("{")
|
49 |
end_index = response.content.rfind("}")
|
50 |
|
@@ -75,11 +80,11 @@ def get_text_post_content(details, reference):
|
|
75 |
template_file="prompts/post_generator_without_source.yml",
|
76 |
input_variables=["NEWS_CONTENT", "CHAR_LENGTH"],
|
77 |
)
|
|
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
)
|
82 |
-
response = post_content_llm.invoke(user_query)
|
83 |
|
84 |
print("POST CONTENT RESPONSE:", response)
|
85 |
|
|
|
2 |
import json
|
3 |
import time
|
4 |
import traceback
|
5 |
+
from dotenv import load_dotenv
|
6 |
|
7 |
+
from langfuse import Langfuse
|
|
|
|
|
|
|
8 |
from langchain_groq import ChatGroq
|
9 |
+
from langfuse.callback import CallbackHandler
|
10 |
+
from langchain_core.prompts import PromptTemplate
|
11 |
|
12 |
load_dotenv()
|
13 |
|
14 |
+
langfuse_handler = CallbackHandler(
|
15 |
+
secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
|
16 |
+
public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
|
17 |
+
host="https://cloud.langfuse.com", # 🇪🇺 EU region
|
18 |
+
)
|
19 |
+
langfuse = Langfuse()
|
20 |
+
|
21 |
analysis_llm = ChatGroq(
|
22 |
model="llama-3.1-8b-instant",
|
23 |
temperature=0.8,
|
|
|
24 |
timeout=None,
|
25 |
max_retries=2,
|
26 |
api_key=os.getenv("GROQ_ANALYSIS_API_KEY"),
|
|
|
35 |
|
36 |
|
37 |
def basic_analysis(news):
|
38 |
+
prompt = langfuse.get_prompt("news_selector")
|
|
|
|
|
|
|
|
|
39 |
|
40 |
for _ in range(5):
|
41 |
try:
|
42 |
response = analysis_llm.invoke(
|
43 |
+
prompt.compile(news_object = news),
|
44 |
+
config={"callbacks": [langfuse_handler]}
|
45 |
)
|
46 |
print("################ BASIC ANALYSIS AGENT RESPONSE ################")
|
47 |
print(response.content)
|
48 |
print("################ BASIC ANALYSIS END AGENT RESPONSE ################")
|
49 |
|
50 |
+
if "</think>" in response.content:
|
51 |
+
response.content = response.content.split("</think>")[1]
|
52 |
+
|
53 |
start_index = response.content.find("{")
|
54 |
end_index = response.content.rfind("}")
|
55 |
|
|
|
80 |
template_file="prompts/post_generator_without_source.yml",
|
81 |
input_variables=["NEWS_CONTENT", "CHAR_LENGTH"],
|
82 |
)
|
83 |
+
prompt = langfuse.get_prompt("post_generator")
|
84 |
|
85 |
+
|
86 |
+
user_query = prompt.compile(NEWS_CONTENT = details, CHAR_LENGTH = 490- len(reference))
|
87 |
+
response = post_content_llm.invoke(user_query, config={"callbacks": [langfuse_handler]})
|
|
|
88 |
|
89 |
print("POST CONTENT RESPONSE:", response)
|
90 |
|
requirements.txt
CHANGED
@@ -13,4 +13,5 @@ fastapi===0.115.8
|
|
13 |
gunicorn===23.0.0
|
14 |
uvicorn===0.34.0
|
15 |
pydantic==2.9.2
|
16 |
-
certifi
|
|
|
|
13 |
gunicorn===23.0.0
|
14 |
uvicorn===0.34.0
|
15 |
pydantic==2.9.2
|
16 |
+
certifi==2025.1.31
|
17 |
+
langfuse==2.60.3
|