File size: 2,184 Bytes
925601e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# ******* THIS FILE CONTAINS ALL THE PROMPTS & CHAINS USED IN Functions.py ***********
from Templates import *
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from dotenv import load_dotenv
import os
import streamlit as st


class PromptTemplates:
    def __init__(self):

        self.legal_adviser_bot_prompt = PromptTemplate(
            input_variables=["chat_history","input",], template=legal_adviser_template
        )

        self.case_summary_prompt = PromptTemplate(
            input_variables=["case_name", "case_info"], template=case_summary_template
        )

        self.legal_case_bot_prompt = PromptTemplate(
            input_variables=["case_summary", "context","input"], template=legal_case_bot_template
        )

        self.lawyer_recommendations_prompt = PromptTemplate(
            input_variables=["user_inputs", "matching_lawyers", "additional_info"], template=lawyer_recommendation_template
        )


class LLMChains:
    def __init__(self):
        load_dotenv()
        openai_api_key = os.getenv("OPENAI_API_KEY")
        obj = PromptTemplates()
        model_name = st.session_state["selected_model"]

        # generate summary chain
        self.legal_adviser_bot_chain = LLMChain(
            llm=OpenAI(model_name='gpt-3.5-turbo-16k', temperature=0.7),
            prompt=obj.legal_adviser_bot_prompt,
            verbose="true",
        )

        # genrate bot conversastion
        self.case_summary_chain = LLMChain(
            llm=OpenAI(model_name=model_name, temperature=0.7),
            prompt=obj.case_summary_prompt,
            verbose="true",
        )

        # genrate bot conversastion
        self.legal_case_bot_chain = LLMChain(
            llm=OpenAI(model_name=model_name, temperature=0.7),
            prompt=obj.legal_case_bot_prompt,
            verbose="true",
        )

        self.lawyer_recommendations_chain = LLMChain(
            llm=OpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.7),
            prompt=obj.lawyer_recommendations_prompt,
            verbose="true",
        )