Spaces:
Sleeping
Sleeping
# ******* THIS FILE CONTAINS ALL THE PROMPTS & CHAINS USED IN Functions.py *********** | |
from Templates import * | |
from langchain import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain.llms import OpenAI | |
from dotenv import load_dotenv | |
import os | |
class PromptTemplates: | |
def __init__(self): | |
self.json_prompt_template = PromptTemplate( | |
input_variables=["user_stories", "json_structure"], | |
template=convert_json_template, | |
) | |
self.user_story_prompt = PromptTemplate( | |
input_variables=["project_name", "rfp_details"], | |
template=user_story_template, | |
) | |
self.bot_prompt = PromptTemplate( | |
input_variables=["context", "input"], template=bot_template | |
) | |
self.summary_prompt = PromptTemplate( | |
input_variables=["project_name", "rfp_details"], template=summary_template | |
) | |
self.estimations_prompt_template = PromptTemplate( | |
input_variables=[ | |
"project_summary", | |
"user_stories", | |
"tech_leads", | |
"senior_developers", | |
"junior_developers", | |
], | |
template=estimations_template, | |
) | |
self.roadmap_prompt = PromptTemplate( | |
input_variables=["project_input"], template=roadmap_template | |
) | |
class LLMChains: | |
def __init__(self): | |
load_dotenv() | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
obj = PromptTemplates() | |
# generate summary chain | |
self.summary_chain = LLMChain( | |
llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7), | |
prompt=obj.summary_prompt, | |
verbose="true", | |
) | |
# genrate bot conversastion | |
self.bot_chain = LLMChain( | |
llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7), | |
prompt=obj.bot_prompt, | |
verbose="true", | |
) | |
# genrate user stories user_story_template = """ | |
self.user_story_chain = LLMChain( | |
llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7), | |
prompt=obj.user_story_prompt, | |
verbose="true", | |
) | |
# prompt template for json | |
self.json_chain = LLMChain( | |
llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7), | |
prompt=obj.json_prompt_template, | |
verbose="true", | |
) | |
self.estimations_chain = LLMChain( | |
llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7), | |
prompt=obj.estimations_prompt_template, | |
verbose="true", | |
) | |
self.roadmap_chain = LLMChain( | |
llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7), | |
prompt=obj.roadmap_prompt, | |
verbose=True, | |
) |