|
|
|
from Templates import * |
|
from langchain import PromptTemplate |
|
from langchain.chains import LLMChain |
|
from langchain.llms import OpenAI |
|
from dotenv import load_dotenv |
|
import os |
|
import streamlit as st |
|
|
|
|
|
class PromptTemplates: |
|
def __init__(self): |
|
|
|
self.case_summary_prompt = PromptTemplate( |
|
input_variables=["case_name", "case_info"], template=case_summary_template |
|
) |
|
|
|
self.legal_case_bot_prompt = PromptTemplate( |
|
input_variables=["case_summary", "context","input"], template=legal_case_bot_template |
|
) |
|
|
|
|
|
|
|
class LLMChains: |
|
def __init__(self): |
|
load_dotenv() |
|
openai_api_key = os.getenv("OPENAI_API_KEY") |
|
obj = PromptTemplates() |
|
model_name = st.session_state["selected_model"] |
|
|
|
|
|
self.case_summary_chain = LLMChain( |
|
llm=OpenAI(model_name=model_name,api_key=os.environ.get('OPEN_API_KEY'), temperature=0.7), |
|
prompt=obj.case_summary_prompt, |
|
verbose="true", |
|
) |
|
|
|
|
|
self.legal_case_bot_chain = LLMChain( |
|
llm=OpenAI(model_name=model_name,api_key=os.environ.get('OPEN_API_KEY'), temperature=0.7), |
|
prompt=obj.legal_case_bot_prompt, |
|
verbose="true", |
|
) |
|
|
|
|
|
|