Spaces:
Sleeping
Sleeping
MikaJLeh
commited on
Commit
·
f2ec360
1
Parent(s):
ac1a3b1
Pushed all files to Hugging Face, replacing old content
Browse files- .DS_Store +0 -0
- Assets/.DS_Store +0 -0
- Assets/StratXcel_white_small.jpg +0 -0
- Assets/black_waves2.jpeg +0 -0
- Assets/credentials.json +6 -0
- Corporate_Documents/.gitkeep +0 -0
- Financial_Documents/.gitkeep +0 -0
- Intercreditor_Documents/.gitkeep +0 -0
- LPA/.gitkeep +0 -0
- StratXcel.png +0 -0
- app.py +1552 -0
- chains_v2/.gitkeep +0 -0
- chains_v2/__init__.py +0 -0
- chains_v2/__pycache__/__init__.cpython-311.pyc +0 -0
- chains_v2/__pycache__/create_questions.cpython-311.pyc +0 -0
- chains_v2/__pycache__/most_pertinent_question.cpython-311.pyc +0 -0
- chains_v2/__pycache__/question_atomizer.cpython-311.pyc +0 -0
- chains_v2/__pycache__/refine_answer.cpython-311.pyc +0 -0
- chains_v2/__pycache__/research_compiler.cpython-311.pyc +0 -0
- chains_v2/__pycache__/retrieval_qa.cpython-311.pyc +0 -0
- chains_v2/create_questions.py +40 -0
- chains_v2/most_pertinent_question.py +29 -0
- chains_v2/question_atomizer.py +34 -0
- chains_v2/refine_answer.py +30 -0
- chains_v2/research_compiler.py +41 -0
- chains_v2/retrieval_qa.py +41 -0
- chroma_db_ESG/.gitkeep +0 -0
- chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/header.bin +0 -0
- chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/index_metadata.pickle +0 -0
- chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/length.bin +0 -0
- chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/link_lists.bin +0 -0
- chroma_db_LPA/.gitkeep +0 -0
- chroma_db_LPA/chroma.sqlite3 +0 -0
- chroma_db_company/.gitkeep +0 -0
- chroma_db_company/13cf8d6e-525d-4494-89ff-c587e1899359/header.bin +0 -0
- chroma_db_company/13cf8d6e-525d-4494-89ff-c587e1899359/length.bin +0 -0
- chroma_db_company/13cf8d6e-525d-4494-89ff-c587e1899359/link_lists.bin +0 -0
- chroma_db_financial/.gitkeep +0 -0
- chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/header.bin +0 -0
- chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/index_metadata.pickle +0 -0
- chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/length.bin +0 -0
- chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/link_lists.bin +0 -0
- chroma_db_intercreditor/.gitkeep +0 -0
- chroma_db_intercreditor/chroma.sqlite3 +0 -0
- data/.gitkeep +0 -0
- helpers/.gitkeep +0 -0
- helpers/__pycache__/questions_helper.cpython-311.pyc +0 -0
- helpers/__pycache__/response_helpers.cpython-311.pyc +0 -0
- helpers/questions_helper.py +23 -0
- helpers/response_helpers.py +45 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
Assets/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
Assets/StratXcel_white_small.jpg
ADDED
![]() |
Assets/black_waves2.jpeg
ADDED
![]() |
Assets/credentials.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Mika": "password1",
|
3 |
+
"Maija": "MikaPassword",
|
4 |
+
"Sara": "password2",
|
5 |
+
"Teemu": "LT_334+DInv"
|
6 |
+
}
|
Corporate_Documents/.gitkeep
ADDED
File without changes
|
Financial_Documents/.gitkeep
ADDED
File without changes
|
Intercreditor_Documents/.gitkeep
ADDED
File without changes
|
LPA/.gitkeep
ADDED
File without changes
|
StratXcel.png
ADDED
![]() |
app.py
ADDED
@@ -0,0 +1,1552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import shutil
|
4 |
+
import streamlit as st
|
5 |
+
from fpdf import FPDF
|
6 |
+
from chromadb import Client
|
7 |
+
from chromadb.config import Settings
|
8 |
+
import json
|
9 |
+
import chromadb
|
10 |
+
from PIL import Image
|
11 |
+
from llama_index.core import VectorStoreIndex
|
12 |
+
from langchain_community.utilities import SerpAPIWrapper
|
13 |
+
from llama_index.core import VectorStoreIndex
|
14 |
+
from langchain_core.output_parsers import StrOutputParser
|
15 |
+
from langchain_core.runnables import RunnablePassthrough
|
16 |
+
from langchain_groq import ChatGroq
|
17 |
+
from langchain.chains import LLMChain
|
18 |
+
from langchain.agents import AgentType, Tool, initialize_agent, AgentExecutor
|
19 |
+
from llama_parse import LlamaParse
|
20 |
+
from langchain_community.document_loaders import UnstructuredMarkdownLoader
|
21 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
22 |
+
from llama_index.core import SimpleDirectoryReader
|
23 |
+
from dotenv import load_dotenv, find_dotenv
|
24 |
+
from streamlit_chat import message
|
25 |
+
from langchain_community.vectorstores import Chroma
|
26 |
+
from langchain_community.utilities import SerpAPIWrapper
|
27 |
+
from langchain.chains import RetrievalQA
|
28 |
+
from langchain_community.document_loaders import DirectoryLoader
|
29 |
+
from langchain_community.document_loaders import PyMuPDFLoader
|
30 |
+
from langchain_community.document_loaders import UnstructuredXMLLoader
|
31 |
+
from langchain_community.document_loaders import CSVLoader
|
32 |
+
from langchain.prompts import PromptTemplate
|
33 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
34 |
+
from langchain.memory import ConversationBufferMemory
|
35 |
+
from langchain.prompts import PromptTemplate
|
36 |
+
import joblib
|
37 |
+
import nltk
|
38 |
+
from dotenv import load_dotenv, find_dotenv
|
39 |
+
import uuid
|
40 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
41 |
+
from langchain_community.vectorstores import Chroma
|
42 |
+
from langchain.chat_models import ChatOpenAI
|
43 |
+
from langchain.embeddings import OpenAIEmbeddings
|
44 |
+
from langchain.prompts import PromptTemplate
|
45 |
+
from yachalk import chalk
|
46 |
+
from langchain.vectorstores import PGVector
|
47 |
+
from langchain.document_loaders import PyPDFLoader, UnstructuredPDFLoader, PyPDFium2Loader
|
48 |
+
from langchain_community.document_loaders import PyPDFDirectoryLoader
|
49 |
+
## Import all the chains.
|
50 |
+
from chains_v2.create_questions import QuestionCreationChain
|
51 |
+
from chains_v2.most_pertinent_question import MostPertinentQuestion
|
52 |
+
from chains_v2.retrieval_qa import retrieval_qa
|
53 |
+
from chains_v2.research_compiler import research_compiler
|
54 |
+
from chains_v2.question_atomizer import QuestionAtomizer
|
55 |
+
from chains_v2.refine_answer import RefineAnswer
|
56 |
+
## Import all the helpers.
|
57 |
+
from helpers.response_helpers import result2QuestionsList
|
58 |
+
from helpers.response_helpers import qStr2Dict
|
59 |
+
from helpers.questions_helper import getAnsweredQuestions
|
60 |
+
from helpers.questions_helper import getUnansweredQuestions
|
61 |
+
from helpers.questions_helper import getSubQuestions
|
62 |
+
from helpers.questions_helper import getHopQuestions
|
63 |
+
from helpers.questions_helper import getLastQuestionId
|
64 |
+
from helpers.questions_helper import markAnswered
|
65 |
+
from helpers.questions_helper import getQuestionById
|
66 |
+
|
67 |
+
import nest_asyncio # noqa: E402
|
68 |
+
nest_asyncio.apply()
|
69 |
+
|
70 |
+
load_dotenv()
|
71 |
+
load_dotenv(find_dotenv())
|
72 |
+
|
73 |
+
nltk.download('averaged_perceptron_tagger_eng')
|
74 |
+
|
75 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
76 |
+
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
|
77 |
+
GOOGLE_CSE_ID = os.environ["GOOGLE_CSE_ID"]
|
78 |
+
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
|
79 |
+
LLAMA_PARSE_API_KEY = os.environ["LLAMA_PARSE_API_KEY"]
|
80 |
+
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
81 |
+
LANGCHAIN_API_KEY = os.environ["LANGCHAIN_API_KEY"]
|
82 |
+
LANGCHAIN_ENDPOINT = os.environ["LANGCHAIN_ENDPOINT"]
|
83 |
+
LANGCHAIN_PROJECT = os.environ["LANGCHAIN_PROJECT"]
|
84 |
+
groq_api_key=os.getenv('GROQ_API_KEY')
|
85 |
+
#--------------
|
86 |
+
im = Image.open("Assets/StratXcel_white_small.jpg")
|
87 |
+
|
88 |
+
st.set_page_config(page_title="StratXcel",
|
89 |
+
page_icon=im,
|
90 |
+
layout="wide")
|
91 |
+
|
92 |
+
st.markdown(
|
93 |
+
"""
|
94 |
+
<style>
|
95 |
+
/* Main app background and text color */
|
96 |
+
.stApp {
|
97 |
+
background-color: black;
|
98 |
+
color: #FAFAFA;
|
99 |
+
font-family: 'sans serif';
|
100 |
+
}
|
101 |
+
/* Background color for the sidebar */
|
102 |
+
.css-1d391kg {
|
103 |
+
background-color: #262730;
|
104 |
+
}
|
105 |
+
/* Text color for sidebar and other text elements */
|
106 |
+
.css-1d391kg, .css-145kmo2 {
|
107 |
+
color: #FAFAFA;
|
108 |
+
}
|
109 |
+
/* Button background color and text color */
|
110 |
+
.css-1v0mbdj, .css-1dbjc4n, .css-1ph4q5j, .stButton button {
|
111 |
+
background-color: #2C5FCB;
|
112 |
+
color: #FAFAFA; /* Text color */
|
113 |
+
}
|
114 |
+
/* Button hover state */
|
115 |
+
.css-1v0mbdj:hover, .css-1dbjc4n:hover, .css-1ph4q5j:hover, .stButton button:hover {
|
116 |
+
background-color: #1a4b8e;
|
117 |
+
}
|
118 |
+
</style>
|
119 |
+
""",
|
120 |
+
unsafe_allow_html=True
|
121 |
+
)
|
122 |
+
#st.sidebar.image('StratXcel.png', width=150)
|
123 |
+
st.image('Assets/black_waves2.jpeg', width=1240)
|
124 |
+
|
125 |
+
def load_credentials(filepath):
|
126 |
+
with open(filepath, 'r') as file:
|
127 |
+
return json.load(file)
|
128 |
+
|
129 |
+
# Load credentials from 'credentials.json'
|
130 |
+
credentials = load_credentials('Assets/credentials.json')
|
131 |
+
|
132 |
+
# Initialize session state if not already done
|
133 |
+
if 'logged_in' not in st.session_state:
|
134 |
+
st.session_state.logged_in = False
|
135 |
+
st.session_state.username = ''
|
136 |
+
|
137 |
+
# Function to handle login
|
138 |
+
def login(username, password):
|
139 |
+
if username in credentials and credentials[username] == password:
|
140 |
+
st.session_state.logged_in = True
|
141 |
+
st.session_state.username = username
|
142 |
+
st.rerun() # Rerun to reflect login state
|
143 |
+
else:
|
144 |
+
st.session_state.logged_in = False
|
145 |
+
st.session_state.username = ''
|
146 |
+
st.error("Invalid username or password.")
|
147 |
+
|
148 |
+
# Function to handle logout
|
149 |
+
def logout():
|
150 |
+
st.session_state.logged_in = False
|
151 |
+
st.session_state.username = ''
|
152 |
+
st.rerun() # Rerun to reflect logout state
|
153 |
+
|
154 |
+
#--------------
|
155 |
+
## Define log printers
|
156 |
+
def print_iteration(current_iteration):
|
157 |
+
print(
|
158 |
+
chalk.bg_yellow_bright.black.bold(
|
159 |
+
f"\n Iteration - {current_iteration} ▷▶ \n"
|
160 |
+
)
|
161 |
+
)
|
162 |
+
|
163 |
+
def print_unanswered_questions(unanswered):
|
164 |
+
print(
|
165 |
+
chalk.cyan_bright("** Unanswered Questions **"),
|
166 |
+
chalk.cyan("".join([f"\n'{q['id']}. {q['question']}'" for q in unanswered])),
|
167 |
+
)
|
168 |
+
|
169 |
+
def print_next_question(current_question_id, current_question):
|
170 |
+
print(
|
171 |
+
chalk.magenta.bold("** 🤔 Next Questions I must ask: **\n"),
|
172 |
+
chalk.magenta(current_question_id),
|
173 |
+
chalk.magenta(current_question["question"]),
|
174 |
+
)
|
175 |
+
|
176 |
+
def print_answer(current_question):
|
177 |
+
print(
|
178 |
+
chalk.yellow_bright.bold("** Answer **\n"),
|
179 |
+
chalk.yellow_bright(current_question["answer"]),
|
180 |
+
)
|
181 |
+
|
182 |
+
def print_final_answer(answerpad):
|
183 |
+
print(
|
184 |
+
chalk.white("** Refined Answer **\n"),
|
185 |
+
chalk.white(answerpad[-1]),
|
186 |
+
)
|
187 |
+
|
188 |
+
def print_max_iterations():
|
189 |
+
print(
|
190 |
+
chalk.bg_yellow_bright.black.bold(
|
191 |
+
"\n ✔✔ Max Iterations Reached. Compiling the results ...\n"
|
192 |
+
)
|
193 |
+
)
|
194 |
+
|
195 |
+
def print_result(result):
|
196 |
+
print(chalk.italic.white_bright((result["text"])))
|
197 |
+
|
198 |
+
def print_sub_question(q):
|
199 |
+
print(chalk.magenta.bold(f"** Sub Question **\n{q['question']}\n{q['answer']}\n"))
|
200 |
+
|
201 |
+
## ---- The researcher ----- ##
|
202 |
+
|
203 |
+
class Agent:
|
204 |
+
## Create chains
|
205 |
+
def __init__(self, agent_settings, scratchpad, store, verbose):
|
206 |
+
self.store = store
|
207 |
+
self.scratchpad = scratchpad
|
208 |
+
self.agent_settings = agent_settings
|
209 |
+
self.verbose = verbose
|
210 |
+
self.question_creation_chain = QuestionCreationChain.from_llm(
|
211 |
+
language_model(
|
212 |
+
temperature=self.agent_settings["question_creation_temperature"]
|
213 |
+
),
|
214 |
+
verbose=self.verbose,
|
215 |
+
)
|
216 |
+
self.question_atomizer = QuestionAtomizer.from_llm(
|
217 |
+
llm=language_model(
|
218 |
+
temperature=self.agent_settings["question_atomizer_temperature"]
|
219 |
+
),
|
220 |
+
verbose=self.verbose,
|
221 |
+
)
|
222 |
+
self.most_pertinent_question = MostPertinentQuestion.from_llm(
|
223 |
+
language_model(
|
224 |
+
temperature=self.agent_settings["question_creation_temperature"]
|
225 |
+
),
|
226 |
+
verbose=self.verbose,
|
227 |
+
)
|
228 |
+
self.refine_answer = RefineAnswer.from_llm(
|
229 |
+
language_model(
|
230 |
+
temperature=self.agent_settings["refine_answer_temperature"]
|
231 |
+
),
|
232 |
+
verbose=self.verbose,
|
233 |
+
)
|
234 |
+
|
235 |
+
def run(self, question):
|
236 |
+
## Step 0. Prepare the initial set of questions
|
237 |
+
atomized_questions_response = self.question_atomizer.run(
|
238 |
+
question=question,
|
239 |
+
num_questions=self.agent_settings["num_atomistic_questions"],
|
240 |
+
)
|
241 |
+
|
242 |
+
self.scratchpad["questions"] += result2QuestionsList(
|
243 |
+
question_response=atomized_questions_response,
|
244 |
+
type="subquestion",
|
245 |
+
status="unanswered",
|
246 |
+
)
|
247 |
+
|
248 |
+
for q in self.scratchpad["questions"]:
|
249 |
+
q["answer"], q["documents"] = retrieval_qa(
|
250 |
+
llm=language_model(
|
251 |
+
temperature=self.agent_settings["qa_temperature"],
|
252 |
+
verbose=self.verbose,
|
253 |
+
),
|
254 |
+
retriever=self.store.as_retriever(
|
255 |
+
search_type="mmr", search_kwargs={"k": 5, "fetch_k": 10}
|
256 |
+
),
|
257 |
+
question=q["question"],
|
258 |
+
answer_length=self.agent_settings["intermediate_answers_length"],
|
259 |
+
verbose=self.verbose,
|
260 |
+
)
|
261 |
+
q["status"] = "answered"
|
262 |
+
print_sub_question(q)
|
263 |
+
|
264 |
+
|
265 |
+
current_context = "".join(
|
266 |
+
f"\n{q['id']}. {q['question']}\n{q['answer']}\n"
|
267 |
+
for q in self.scratchpad["questions"]
|
268 |
+
)
|
269 |
+
|
270 |
+
self.scratchpad["answerpad"] += [current_context]
|
271 |
+
|
272 |
+
current_iteration = 0
|
273 |
+
|
274 |
+
while True:
|
275 |
+
current_iteration += 1
|
276 |
+
print_iteration(current_iteration)
|
277 |
+
|
278 |
+
# STEP 1: create questions
|
279 |
+
start_id = getLastQuestionId(self.scratchpad["questions"]) + 1
|
280 |
+
questions_response = self.question_creation_chain.run(
|
281 |
+
question=question,
|
282 |
+
context=current_context,
|
283 |
+
previous_questions=[
|
284 |
+
"".join(f"\n{q['question']}") for q in self.scratchpad["questions"]
|
285 |
+
],
|
286 |
+
num_questions=self.agent_settings["num_questions_per_iteration"],
|
287 |
+
start_id=start_id,
|
288 |
+
)
|
289 |
+
self.scratchpad["questions"] += result2QuestionsList(
|
290 |
+
question_response=questions_response,
|
291 |
+
type="hop",
|
292 |
+
status="unanswered",
|
293 |
+
)
|
294 |
+
|
295 |
+
# STEP 2: Choose question for current iteration
|
296 |
+
unanswered = getUnansweredQuestions(self.scratchpad["questions"])
|
297 |
+
unanswered_questions_prompt = self.unanswered_questions_prompt(unanswered)
|
298 |
+
print_unanswered_questions(unanswered)
|
299 |
+
response = self.most_pertinent_question.run(
|
300 |
+
original_question=question,
|
301 |
+
unanswered_questions=unanswered_questions_prompt,
|
302 |
+
)
|
303 |
+
current_question_dict = qStr2Dict(question=response)
|
304 |
+
current_question_id = current_question_dict["id"]
|
305 |
+
current_question = getQuestionById(
|
306 |
+
self.scratchpad["questions"], current_question_id
|
307 |
+
)
|
308 |
+
print_next_question(current_question_id, current_question)
|
309 |
+
|
310 |
+
# STEP 3: Answer the question
|
311 |
+
current_question["answer"], current_question["documents"] = retrieval_qa(
|
312 |
+
llm=language_model(
|
313 |
+
temperature=self.agent_settings["qa_temperature"],
|
314 |
+
verbose=self.verbose,
|
315 |
+
),
|
316 |
+
retriever=self.store.as_retriever(
|
317 |
+
search_type="mmr", search_kwargs={"k": 5, "fetch_k": 10}
|
318 |
+
),
|
319 |
+
question=current_question["question"],
|
320 |
+
answer_length=self.agent_settings["intermediate_answers_length"],
|
321 |
+
verbose=self.verbose,
|
322 |
+
)
|
323 |
+
markAnswered(self.scratchpad["questions"], current_question_id)
|
324 |
+
print_answer(current_question)
|
325 |
+
current_context = current_question["answer"]
|
326 |
+
|
327 |
+
## STEP 4: refine the answer
|
328 |
+
refinement_context = current_question["question"] + "\n" + current_context
|
329 |
+
refine_answer = self.refine_answer.run(
|
330 |
+
question=question,
|
331 |
+
context=refinement_context,
|
332 |
+
answer=self.get_latest_answer(),
|
333 |
+
)
|
334 |
+
self.scratchpad["answerpad"] += [refine_answer]
|
335 |
+
print_final_answer(self.scratchpad["answerpad"])
|
336 |
+
|
337 |
+
if current_iteration > self.agent_settings["max_iterations"]:
|
338 |
+
print_max_iterations()
|
339 |
+
break
|
340 |
+
|
341 |
+
def unanswered_questions_prompt(self, unanswered):
|
342 |
+
return (
|
343 |
+
"[" + "".join([f"\n{q['id']}. {q['question']}" for q in unanswered]) + "]"
|
344 |
+
)
|
345 |
+
|
346 |
+
def notes_prompt(self, answered_questions):
|
347 |
+
return "".join(
|
348 |
+
[
|
349 |
+
f"{{ Question: {q['question']}, Answer: {q['answer']} }}"
|
350 |
+
for q in answered_questions
|
351 |
+
]
|
352 |
+
)
|
353 |
+
|
354 |
+
def get_latest_answer(self):
|
355 |
+
answers = self.scratchpad["answerpad"]
|
356 |
+
answer = answers[-1] if answers else ""
|
357 |
+
return answer
|
358 |
+
|
359 |
+
#--------------
|
360 |
+
# If not logged in, show login form
|
361 |
+
if not st.session_state.logged_in:
|
362 |
+
st.sidebar.write("Login")
|
363 |
+
username = st.sidebar.text_input('Username')
|
364 |
+
password = st.sidebar.text_input('Password', type='password')
|
365 |
+
if st.sidebar.button('Login'):
|
366 |
+
login(username, password)
|
367 |
+
# Stop the script here if the user is not logged in
|
368 |
+
st.stop()
|
369 |
+
|
370 |
+
|
371 |
+
# If logged in, show logout button and main content
|
372 |
+
#st.sidebar.image('StratXcel.png', width=150)
|
373 |
+
if st.session_state.logged_in:
|
374 |
+
st.sidebar.write(f"Welcome, {st.session_state.username}!")
|
375 |
+
if st.sidebar.button('Logout'):
|
376 |
+
logout()
|
377 |
+
|
378 |
+
#st.write(css, unsafe_allow_html=True)
|
379 |
+
|
380 |
+
company_document = st.sidebar.toggle("Shareholder agreement", False)
|
381 |
+
financial_document = st.sidebar.toggle("Debt agreement", False)
|
382 |
+
intercreditor_document = st.sidebar.toggle("Intercreditor agreement", False)
|
383 |
+
LPA_document = st.sidebar.toggle("Limited partnership agreement", False)
|
384 |
+
ESG_document = st.sidebar.toggle("ESG report", False)
|
385 |
+
|
386 |
+
#-------------
|
387 |
+
llm=ChatGroq(groq_api_key=groq_api_key,
|
388 |
+
model_name="llama-3.2-90b-vision-preview", temperature = 0.0, streaming=True)
|
389 |
+
Llama = "llama-3.2-90b-vision-preview"
|
390 |
+
#--------------
|
391 |
+
def language_model(
|
392 |
+
model_name: str = Llama, temperature: float = 0, verbose: bool = False
|
393 |
+
):
|
394 |
+
llm=ChatGroq(groq_api_key=groq_api_key, model_name=model_name, temperature=temperature, verbose=verbose)
|
395 |
+
return llm
|
396 |
+
#--------------
|
397 |
+
doc_retriever_company = None
|
398 |
+
doc_retriever_financials = None
|
399 |
+
doc_retriever_intercreditor = None
|
400 |
+
doc_retriever_LPA = None
|
401 |
+
doc_retriever_ESG = None
|
402 |
+
#--------------
|
403 |
+
|
404 |
+
#@st.cache_data
|
405 |
+
def load_or_parse_data_company():
|
406 |
+
data_file = "./data/parsed_data_company.pkl"
|
407 |
+
|
408 |
+
parsingInstructionUber10k = """The provided documents are company law documents of a company.
|
409 |
+
They contain detailed information about the company's rights and obligations of the company and its shareholders, and management.
|
410 |
+
They also contain procedures for dispute resolution, voting, control priority, and exit and sale situations.
|
411 |
+
You must never provide false legal or financial information. Use only the information included in the context documents.
|
412 |
+
Only refer to other sources if the context document refers to them or if necessary to provide additional understanding to the company's documents."""
|
413 |
+
|
414 |
+
parser = LlamaParse(api_key=LLAMA_PARSE_API_KEY,
|
415 |
+
result_type="markdown",
|
416 |
+
parsing_instruction=parsingInstructionUber10k,
|
417 |
+
max_timeout=5000,
|
418 |
+
gpt4o_mode=True,
|
419 |
+
)
|
420 |
+
|
421 |
+
file_extractor = {".pdf": parser,
|
422 |
+
".docx": parser,
|
423 |
+
".doc": parser,
|
424 |
+
}
|
425 |
+
reader = SimpleDirectoryReader("./Corporate_Documents", file_extractor=file_extractor)
|
426 |
+
documents = reader.load_data()
|
427 |
+
|
428 |
+
print("Saving the parse results in .pkl format ..........")
|
429 |
+
joblib.dump(documents, data_file)
|
430 |
+
|
431 |
+
# Set the parsed data to the variable
|
432 |
+
parsed_data_company = documents
|
433 |
+
|
434 |
+
return parsed_data_company
|
435 |
+
|
436 |
+
#@st.cache_data
|
437 |
+
def load_or_parse_data_financial():
|
438 |
+
data_file = "./data/parsed_data_financial.pkl"
|
439 |
+
|
440 |
+
parsingInstructionUber10k = """The provided documents are financial law documents of a company.
|
441 |
+
They contain detailed information about the rights and obligations of the company and its creditors.
|
442 |
+
They also contain procedures for acceleration of debt, sale of security, enforcement, use of creditor control, priority and distribution of assets.
|
443 |
+
You must never provide false legal or financial information. Use only the information included in the context documents.
|
444 |
+
Only refer to other sources if the context document refers to them or if necessary to provide additional understanding to company's documents."""
|
445 |
+
|
446 |
+
parser = LlamaParse(api_key=LLAMA_PARSE_API_KEY,
|
447 |
+
result_type="markdown",
|
448 |
+
parsing_instruction=parsingInstructionUber10k,
|
449 |
+
max_timeout=5000,
|
450 |
+
gpt4o_mode=True,
|
451 |
+
)
|
452 |
+
|
453 |
+
file_extractor = {".pdf": parser,
|
454 |
+
".docx": parser,
|
455 |
+
".doc": parser,
|
456 |
+
}
|
457 |
+
reader = SimpleDirectoryReader("./Financial_Documents", file_extractor=file_extractor)
|
458 |
+
documents = reader.load_data()
|
459 |
+
|
460 |
+
print("Saving the parse results in .pkl format ..........")
|
461 |
+
joblib.dump(documents, data_file)
|
462 |
+
|
463 |
+
# Set the parsed data to the variable
|
464 |
+
parsed_data_financial = documents
|
465 |
+
|
466 |
+
return parsed_data_financial
|
467 |
+
|
468 |
+
#--------------
|
469 |
+
#@st.cache_data
|
470 |
+
def load_or_parse_data_intercreditor():
|
471 |
+
data_file = "./data/parsed_data_intercreditor.pkl"
|
472 |
+
|
473 |
+
parsingInstructionUber10k = """The provided document is an intercreditor agreement between a company and its creditor groups.
|
474 |
+
They contain detailed information about the rights and obligations of the company and its creditors and creditor groups.
|
475 |
+
They also contain procedures for acceleration of debt, sale of security, enforcement, use of creditor control, priority and distribution of assets.
|
476 |
+
You must never provide false legal or financial information. Use only the information included in the context documents.
|
477 |
+
Only refer to other sources if the context document refers to them or if necessary to provide additional understanding to company's documents."""
|
478 |
+
|
479 |
+
parser = LlamaParse(api_key=LLAMA_PARSE_API_KEY,
|
480 |
+
result_type="markdown",
|
481 |
+
parsing_instruction=parsingInstructionUber10k,
|
482 |
+
max_timeout=5000,
|
483 |
+
gpt4o_mode=True,
|
484 |
+
)
|
485 |
+
|
486 |
+
file_extractor = {".pdf": parser,
|
487 |
+
".docx": parser,
|
488 |
+
".doc": parser,
|
489 |
+
}
|
490 |
+
reader = SimpleDirectoryReader("./Intercreditor_Documents", file_extractor=file_extractor)
|
491 |
+
documents = reader.load_data()
|
492 |
+
|
493 |
+
print("Saving the parse results in .pkl format ..........")
|
494 |
+
joblib.dump(documents, data_file)
|
495 |
+
|
496 |
+
# Set the parsed data to the variable
|
497 |
+
parsed_data_financial = documents
|
498 |
+
|
499 |
+
return parsed_data_financial
|
500 |
+
|
501 |
+
#@st.cache_data
|
502 |
+
def load_or_parse_data_LPA():
|
503 |
+
data_file = "./data/parsed_data_LPA.pkl"
|
504 |
+
|
505 |
+
parsingInstructionUber10k = """The provided document is a limited partnership agreement between a fund, general partner and limited partners.
|
506 |
+
They contain detailed information about the environmental, social and governance aspects of the company.
|
507 |
+
You must never provide false legal, statistical or financial information. Use only the information included in the context documents.
|
508 |
+
Only refer to other sources if the context document refers to them or if necessary to provide additional understanding to company's documents."""
|
509 |
+
|
510 |
+
parser = LlamaParse(api_key=LLAMA_PARSE_API_KEY,
|
511 |
+
result_type="markdown",
|
512 |
+
parsing_instruction=parsingInstructionUber10k,
|
513 |
+
max_timeout=5000,
|
514 |
+
gpt4o_mode=True,
|
515 |
+
)
|
516 |
+
|
517 |
+
file_extractor = {".pdf": parser,
|
518 |
+
".docx": parser,
|
519 |
+
".doc": parser,
|
520 |
+
}
|
521 |
+
reader = SimpleDirectoryReader("./LPA", file_extractor=file_extractor)
|
522 |
+
documents = reader.load_data()
|
523 |
+
|
524 |
+
print("Saving the parse results in .pkl format ..........")
|
525 |
+
joblib.dump(documents, data_file)
|
526 |
+
|
527 |
+
# Set the parsed data to the variable
|
528 |
+
parsed_data_financial = documents
|
529 |
+
|
530 |
+
return parsed_data_financial
|
531 |
+
|
532 |
+
#--------------
|
533 |
+
#@st.cache_data
|
534 |
+
def load_or_parse_data_ESG():
|
535 |
+
data_file = "./data/parsed_data_ESG.pkl"
|
536 |
+
|
537 |
+
parsingInstructionUber10k = """The provided document is an ESG and sustainability document of a company.
|
538 |
+
They contain detailed information about the rights and obligations of the fund, general partner and limited partners.
|
539 |
+
They also contain procedures for investments, additional investments, general partner and fund costs, liability and other fund matters.
|
540 |
+
You must never provide false legal or financial information. Use only the information included in the context documents.
|
541 |
+
Only refer to other sources if the context document refers to them or if necessary to provide additional understanding to company's documents."""
|
542 |
+
|
543 |
+
parser = LlamaParse(api_key=LLAMA_PARSE_API_KEY,
|
544 |
+
result_type="markdown",
|
545 |
+
parsing_instruction=parsingInstructionUber10k,
|
546 |
+
max_timeout=5000,
|
547 |
+
gpt4o_mode=True,
|
548 |
+
)
|
549 |
+
|
550 |
+
file_extractor = {".pdf": parser,
|
551 |
+
".docx": parser,
|
552 |
+
".doc": parser,
|
553 |
+
}
|
554 |
+
reader = SimpleDirectoryReader("./ESG", file_extractor=file_extractor)
|
555 |
+
documents = reader.load_data()
|
556 |
+
|
557 |
+
print("Saving the parse results in .pkl format ..........")
|
558 |
+
joblib.dump(documents, data_file)
|
559 |
+
|
560 |
+
# Set the parsed data to the variable
|
561 |
+
parsed_data_financial = documents
|
562 |
+
|
563 |
+
return parsed_data_financial
|
564 |
+
|
565 |
+
#--------------
|
566 |
+
# Create vector database
|
567 |
+
@st.cache_resource
|
568 |
+
def create_vector_database_company():
|
569 |
+
|
570 |
+
llama_parse_documents = load_or_parse_data_company()
|
571 |
+
|
572 |
+
with open('data/output_company.md', 'a') as f: # Open the file in append mode ('a')
|
573 |
+
for doc in llama_parse_documents:
|
574 |
+
f.write(doc.text + '\n')
|
575 |
+
|
576 |
+
markdown_path = "data/output_company.md"
|
577 |
+
loader = UnstructuredMarkdownLoader(markdown_path)
|
578 |
+
documents = loader.load()
|
579 |
+
|
580 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=30)
|
581 |
+
docs = text_splitter.split_documents(documents)
|
582 |
+
|
583 |
+
print(f"length of documents loaded: {len(documents)}")
|
584 |
+
print(f"total number of document chunks generated :{len(docs)}")
|
585 |
+
|
586 |
+
persist_directory = "./chroma_db_company" # Specify directory for Chroma persistence
|
587 |
+
embed_model = HuggingFaceEmbeddings()
|
588 |
+
print('Vector DB not yet created !')
|
589 |
+
|
590 |
+
vs = Chroma.from_documents(
|
591 |
+
documents=docs,
|
592 |
+
embedding=embed_model,
|
593 |
+
collection_name="rag_company",
|
594 |
+
persist_directory=persist_directory # Ensure persistence
|
595 |
+
)
|
596 |
+
|
597 |
+
doc_retriever_company = vs
|
598 |
+
|
599 |
+
index = VectorStoreIndex.from_documents(llama_parse_documents)
|
600 |
+
query_engine = index.as_query_engine()
|
601 |
+
|
602 |
+
print('Vector DB created successfully !')
|
603 |
+
return doc_retriever_company, query_engine
|
604 |
+
|
605 |
+
@st.cache_resource
|
606 |
+
def create_vector_database_financial():
|
607 |
+
# Call the function to either load or parse the data
|
608 |
+
llama_parse_documents = load_or_parse_data_financial()
|
609 |
+
|
610 |
+
with open('data/output_financials.md', 'a') as f: # Open the file in append mode ('a')
|
611 |
+
for doc in llama_parse_documents:
|
612 |
+
f.write(doc.text + '\n')
|
613 |
+
|
614 |
+
markdown_path = "data/output_financials.md"
|
615 |
+
loader = UnstructuredMarkdownLoader(markdown_path)
|
616 |
+
documents = loader.load()
|
617 |
+
# Split loaded documents into chunks
|
618 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=15)
|
619 |
+
docs = text_splitter.split_documents(documents)
|
620 |
+
|
621 |
+
print(f"length of documents loaded: {len(documents)}")
|
622 |
+
print(f"total number of document chunks generated :{len(docs)}")
|
623 |
+
persist_directory = "./chroma_db_financial" # Specify directory for Chroma persistence
|
624 |
+
|
625 |
+
embed_model = HuggingFaceEmbeddings()
|
626 |
+
|
627 |
+
vs = Chroma.from_documents(
|
628 |
+
documents=docs,
|
629 |
+
embedding=embed_model,
|
630 |
+
collection_name="rag_financial",
|
631 |
+
persist_directory=persist_directory # Ensure persistence
|
632 |
+
)
|
633 |
+
doc_retriever_financial = vs
|
634 |
+
|
635 |
+
index = VectorStoreIndex.from_documents(llama_parse_documents)
|
636 |
+
query_engine = index.as_query_engine()
|
637 |
+
|
638 |
+
print('Vector DB created successfully !')
|
639 |
+
return doc_retriever_financial, query_engine
|
640 |
+
|
641 |
+
#--------------
|
642 |
+
|
643 |
+
@st.cache_resource
|
644 |
+
def create_vector_database_intercreditor():
|
645 |
+
# Call the function to either load or parse the data
|
646 |
+
llama_parse_documents = load_or_parse_data_intercreditor()
|
647 |
+
|
648 |
+
with open('data/output_intercreditor.md', 'a') as f: # Open the file in append mode ('a')
|
649 |
+
for doc in llama_parse_documents:
|
650 |
+
f.write(doc.text + '\n')
|
651 |
+
|
652 |
+
markdown_path = "data/output_intercreditor.md"
|
653 |
+
loader = UnstructuredMarkdownLoader(markdown_path)
|
654 |
+
documents = loader.load()
|
655 |
+
# Split loaded documents into chunks
|
656 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=15)
|
657 |
+
docs = text_splitter.split_documents(documents)
|
658 |
+
|
659 |
+
print(f"length of documents loaded: {len(documents)}")
|
660 |
+
print(f"total number of document chunks generated :{len(docs)}")
|
661 |
+
persist_directory = "./chroma_db_intercreditor" # Specify directory for Chroma persistence
|
662 |
+
embed_model = HuggingFaceEmbeddings()
|
663 |
+
|
664 |
+
vs = Chroma.from_documents(
|
665 |
+
documents=docs,
|
666 |
+
embedding=embed_model,
|
667 |
+
collection_name="rag_intercreditor",
|
668 |
+
persist_directory=persist_directory # Ensure persistence
|
669 |
+
)
|
670 |
+
doc_retriever_intercreditor = vs
|
671 |
+
|
672 |
+
index = VectorStoreIndex.from_documents(llama_parse_documents)
|
673 |
+
query_engine = index.as_query_engine()
|
674 |
+
|
675 |
+
print('Vector DB created successfully !')
|
676 |
+
return doc_retriever_intercreditor, query_engine
|
677 |
+
|
678 |
+
#--------------
|
679 |
+
|
680 |
+
@st.cache_resource
|
681 |
+
def create_vector_database_LPA():
|
682 |
+
# Call the function to either load or parse the data
|
683 |
+
llama_parse_documents = load_or_parse_data_LPA()
|
684 |
+
|
685 |
+
with open('data/output_LPA.md', 'a') as f: # Open the file in append mode ('a')
|
686 |
+
for doc in llama_parse_documents:
|
687 |
+
f.write(doc.text + '\n')
|
688 |
+
|
689 |
+
markdown_path = "data/output_LPA.md"
|
690 |
+
loader = UnstructuredMarkdownLoader(markdown_path)
|
691 |
+
documents = loader.load()
|
692 |
+
# Split loaded documents into chunks
|
693 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=15)
|
694 |
+
docs = text_splitter.split_documents(documents)
|
695 |
+
|
696 |
+
print(f"length of documents loaded: {len(documents)}")
|
697 |
+
print(f"total number of document chunks generated :{len(docs)}")
|
698 |
+
persist_directory = "./chroma_db_LPA" # Specify directory for Chroma persistence
|
699 |
+
embed_model = HuggingFaceEmbeddings()
|
700 |
+
|
701 |
+
vs = Chroma.from_documents(
|
702 |
+
documents=docs,
|
703 |
+
embedding=embed_model,
|
704 |
+
collection_name="rag_LPA",
|
705 |
+
persist_directory=persist_directory # Ensure persistence
|
706 |
+
)
|
707 |
+
doc_retriever_LPA = vs
|
708 |
+
|
709 |
+
index = VectorStoreIndex.from_documents(llama_parse_documents)
|
710 |
+
query_engine = index.as_query_engine()
|
711 |
+
|
712 |
+
print('Vector DB created successfully !')
|
713 |
+
return doc_retriever_LPA, query_engine
|
714 |
+
|
715 |
+
#--------------
|
716 |
+
|
717 |
+
@st.cache_resource
|
718 |
+
def create_vector_database_ESG():
|
719 |
+
# Call the function to either load or parse the data
|
720 |
+
llama_parse_documents = load_or_parse_data_ESG()
|
721 |
+
|
722 |
+
with open('data/output_ESG.md', 'a') as f: # Open the file in append mode ('a')
|
723 |
+
for doc in llama_parse_documents:
|
724 |
+
f.write(doc.text + '\n')
|
725 |
+
|
726 |
+
markdown_path = "data/output_ESG.md"
|
727 |
+
loader = UnstructuredMarkdownLoader(markdown_path)
|
728 |
+
documents = loader.load()
|
729 |
+
# Split loaded documents into chunks
|
730 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=15)
|
731 |
+
docs = text_splitter.split_documents(documents)
|
732 |
+
|
733 |
+
print(f"length of documents loaded: {len(documents)}")
|
734 |
+
print(f"total number of document chunks generated :{len(docs)}")
|
735 |
+
persist_directory = "./chroma_db_ESG" # Specify directory for Chroma persistence
|
736 |
+
embed_model = HuggingFaceEmbeddings()
|
737 |
+
|
738 |
+
vs = Chroma.from_documents(
|
739 |
+
documents=docs,
|
740 |
+
embedding=embed_model,
|
741 |
+
collection_name="rag_ESG",
|
742 |
+
persist_directory=persist_directory # Ensure persistence
|
743 |
+
)
|
744 |
+
doc_retriever_ESG = vs
|
745 |
+
|
746 |
+
index = VectorStoreIndex.from_documents(llama_parse_documents)
|
747 |
+
query_engine = index.as_query_engine()
|
748 |
+
|
749 |
+
print('Vector DB created successfully !')
|
750 |
+
return doc_retriever_ESG, query_engine
|
751 |
+
|
752 |
+
#--------------
|
753 |
+
legal_analysis_button_key = "legal_strategy_button"
|
754 |
+
#---------------
|
755 |
+
def delete_files_and_folders(folder_path):
|
756 |
+
for root, dirs, files in os.walk(folder_path, topdown=False):
|
757 |
+
for file in files:
|
758 |
+
try:
|
759 |
+
os.unlink(os.path.join(root, file))
|
760 |
+
except Exception as e:
|
761 |
+
st.error(f"Error deleting {os.path.join(root, file)}: {e}")
|
762 |
+
for dir in dirs:
|
763 |
+
try:
|
764 |
+
os.rmdir(os.path.join(root, dir))
|
765 |
+
except Exception as e:
|
766 |
+
st.error(f"Error deleting directory {os.path.join(root, dir)}: {e}")
|
767 |
+
#---------------
|
768 |
+
|
769 |
+
if company_document:
|
770 |
+
uploaded_files_ESG = st.sidebar.file_uploader("Choose company law documents", accept_multiple_files=True, key="company_files")
|
771 |
+
for uploaded_file in uploaded_files_ESG:
|
772 |
+
st.write("filename:", uploaded_file.name)
|
773 |
+
def save_uploadedfile(uploadedfile):
|
774 |
+
with open(os.path.join("Corporate_Documents",uploadedfile.name),"wb") as f:
|
775 |
+
f.write(uploadedfile.getbuffer())
|
776 |
+
return st.success("Saved File:{} to Company_Documents".format(uploadedfile.name))
|
777 |
+
save_uploadedfile(uploaded_file)
|
778 |
+
|
779 |
+
if financial_document:
|
780 |
+
uploaded_files_financials = st.sidebar.file_uploader("Choose financial law documents", accept_multiple_files=True, key="financial_files")
|
781 |
+
for uploaded_file in uploaded_files_financials:
|
782 |
+
st.write("filename:", uploaded_file.name)
|
783 |
+
def save_uploadedfile(uploadedfile):
|
784 |
+
with open(os.path.join("Financial_Documents",uploadedfile.name),"wb") as f:
|
785 |
+
f.write(uploadedfile.getbuffer())
|
786 |
+
return st.success("Saved File:{} to Financial_Documents".format(uploadedfile.name))
|
787 |
+
save_uploadedfile(uploaded_file)
|
788 |
+
|
789 |
+
if intercreditor_document:
|
790 |
+
uploaded_files_intercreditor = st.sidebar.file_uploader("Choose intercreditor documents", accept_multiple_files=True, key="intercreditor_files")
|
791 |
+
for uploaded_file in uploaded_files_intercreditor:
|
792 |
+
st.write("filename:", uploaded_file.name)
|
793 |
+
def save_uploadedfile(uploadedfile):
|
794 |
+
with open(os.path.join("Intercreditor_Documents",uploadedfile.name),"wb") as f:
|
795 |
+
f.write(uploadedfile.getbuffer())
|
796 |
+
return st.success("Saved File:{} to Intercreditor_Documents".format(uploadedfile.name))
|
797 |
+
save_uploadedfile(uploaded_file)
|
798 |
+
|
799 |
+
if LPA_document:
|
800 |
+
uploaded_files_LPA = st.sidebar.file_uploader("Choose LPA", accept_multiple_files=True, key="LPA_files")
|
801 |
+
for uploaded_file in uploaded_files_LPA:
|
802 |
+
st.write("filename:", uploaded_file.name)
|
803 |
+
def save_uploadedfile(uploadedfile):
|
804 |
+
with open(os.path.join("LPA",uploadedfile.name),"wb") as f:
|
805 |
+
f.write(uploadedfile.getbuffer())
|
806 |
+
return st.success("Saved File:{} to LPA".format(uploadedfile.name))
|
807 |
+
save_uploadedfile(uploaded_file)
|
808 |
+
|
809 |
+
if ESG_document:
|
810 |
+
uploaded_files_ESG = st.sidebar.file_uploader("Choose ESG document", accept_multiple_files=True, key="ESG_files")
|
811 |
+
for uploaded_file in uploaded_files_ESG:
|
812 |
+
st.write("filename:", uploaded_file.name)
|
813 |
+
def save_uploadedfile(uploadedfile):
|
814 |
+
with open(os.path.join("ESG",uploadedfile.name),"wb") as f:
|
815 |
+
f.write(uploadedfile.getbuffer())
|
816 |
+
return st.success("Saved File:{} to ESG".format(uploadedfile.name))
|
817 |
+
save_uploadedfile(uploaded_file)
|
818 |
+
#---------------
|
819 |
+
def company_strategy():
|
820 |
+
doc_retriever_company, query_engine = create_vector_database_company()
|
821 |
+
doc_retriever_company = doc_retriever_company.as_retriever()
|
822 |
+
|
823 |
+
prompt_template = """<|system|>
|
824 |
+
You are a seasoned attorney specializing in company law and legal analysis. You write expert analyses for institutional investors.
|
825 |
+
Your answer should not exceed three paragraphs.
|
826 |
+
The text should be technical legal text but easy to understand for a professional investor.
|
827 |
+
Explain the actual contents of the clauses and sections relevant to the question.
|
828 |
+
Include, at the end of the response, as a source the titles of the contract clauses from which the answer was obtained.
|
829 |
+
Base your responses to the specific parts of the context document.<|end|>
|
830 |
+
<|user|>
|
831 |
+
Answer the {question} based on the information you find in context: {context} <|end|>
|
832 |
+
<|assistant|>"""
|
833 |
+
|
834 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"])
|
835 |
+
|
836 |
+
qa = (
|
837 |
+
{
|
838 |
+
"context": doc_retriever_company,
|
839 |
+
"question": RunnablePassthrough(),
|
840 |
+
}
|
841 |
+
| prompt
|
842 |
+
| llm
|
843 |
+
| StrOutputParser()
|
844 |
+
)
|
845 |
+
|
846 |
+
#Corporate_answer_0 = qa.invoke("List the parties of the agreement and the business of the company? What categories of shares and shareholders are there? Are there conditions precedent to investment")
|
847 |
+
Corporate_answer_0 = query_engine.query("List the parties of the agreement and the business of the company? What categories of shares and shareholders are there? Are there conditions precedent to investment")
|
848 |
+
|
849 |
+
Corporate_answer_1 = qa.invoke("Describe the provisions governing nomination and removal of board members of the company?")
|
850 |
+
|
851 |
+
Corporate_answer_2 = qa.invoke("Describe the company's share capital structure, including any provisions for different classes of shares and the rights attached to them. How are voting rights distributed among shareholders?")
|
852 |
+
|
853 |
+
Corporate_answer_3 = qa.invoke("Summarize the procedures for decision-making in shareholder meetings and board meetings. Focus on decisions that require approval of some shareholders.")
|
854 |
+
|
855 |
+
Corporate_answer_4 = qa.invoke("Summarize the provisions governing sale of shares, possible redemption rights, drag along and tag along rights and other exist situations of the shareholders.")
|
856 |
+
|
857 |
+
Corporate_answer_5 = qa.invoke("Explain how and in what capacity new shareholders are admitted to the company. Does this require shareholder or board approval?")
|
858 |
+
|
859 |
+
Corporate_answer_6 = qa.invoke("What mechanisms are in place for resolving shareholder disputes? Provide details on any arbitration or mediation clauses found in the company's articles or shareholders' agreements.")
|
860 |
+
|
861 |
+
|
862 |
+
corporate_output = f"**__The Parties:__** {Corporate_answer_0} \n\n **__Director Appointment and Removal:__** {Corporate_answer_1} \n\n **__Share Capital Structure and Voting Rights:__** {Corporate_answer_2} \n\n **__Corporate Decisions:__** {Corporate_answer_3} \n\n **__Transfer of Shares:__** {Corporate_answer_4} \n\n **__Adherence of new shareholders:__** {Corporate_answer_5} \n\n **__Dispute Resolution:__** {Corporate_answer_6}"
|
863 |
+
|
864 |
+
financial_output = corporate_output
|
865 |
+
|
866 |
+
with open("company_analysis.txt", 'w') as file:
|
867 |
+
file.write(financial_output)
|
868 |
+
|
869 |
+
return financial_output
|
870 |
+
|
871 |
+
def financial_strategy():
|
872 |
+
doc_retriever_financial, query_engine = create_vector_database_financial()
|
873 |
+
doc_retriever_financial = doc_retriever_financial.as_retriever()
|
874 |
+
|
875 |
+
prompt_template = """<|system|> You are a seasoned attorney specializing in financial law and legal analysis. You write expert analyses for institutional investors.
|
876 |
+
Give only specific details and contract clauses about the provided documents.
|
877 |
+
Your answer should not exceed three paragraphs. The maximum number of sentences is twenty.
|
878 |
+
The text should be technical legal text but easy to understand for a professional investor.
|
879 |
+
Divide the output into paragraphs.
|
880 |
+
Explain the legal contents of the clauses and sections relevant to the question.
|
881 |
+
Include the titles of the contract clauses from which the information was obtained as a reference. Do not refer to the document as a whole but to specific clauses
|
882 |
+
Use other knowledge to supplement the contract terms and conditions only if absolutely necessary.<|end|>
|
883 |
+
<|user|>
|
884 |
+
Answer the {question} based on the information you find in context: {context} <|end|>
|
885 |
+
<|assistant|>"""
|
886 |
+
|
887 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"])
|
888 |
+
|
889 |
+
qa = (
|
890 |
+
{
|
891 |
+
"context": doc_retriever_financial,
|
892 |
+
"question": RunnablePassthrough(),
|
893 |
+
}
|
894 |
+
| prompt
|
895 |
+
| llm
|
896 |
+
| StrOutputParser()
|
897 |
+
)
|
898 |
+
|
899 |
+
Financial_answer_0 = query_engine.query("Identify the parties involved in the loan, bond, or security agreements. What are the key obligations of the borrower or issuer under these agreements?")
|
900 |
+
|
901 |
+
Financial_answer_1 = qa.invoke("Describe any financial covenants or ratios that must be maintained and the most important general covenants.")
|
902 |
+
|
903 |
+
Financial_answer_3 = qa.invoke("What are the provisions governing events of default under the company's loan, bond, or security agreements? Include details on any cross-default or material adverse change clauses.")
|
904 |
+
|
905 |
+
Financial_answer_4 = qa.invoke("Describe the rights of secured creditors under the security agreements. What types of collateral are secured, and what are the enforcement mechanisms in case of default?")
|
906 |
+
|
907 |
+
Financial_answer_5 = qa.invoke("What acceleration clauses exist within the loan, bond, or security agreements? Under what conditions can creditors demand early repayment or terminate financing arrangements?")
|
908 |
+
|
909 |
+
Financial_answer_6 = qa.invoke("Explain the procedures for enforcing security interests under the security agreements. How do the rights of secured creditors differ from those of unsecured creditors in such circumstances?")
|
910 |
+
|
911 |
+
Financial_answer_7 = qa.invoke("How are decisions related to enforcement or restructuring prioritized among different classes of creditors under the loan, bond, or security agreements?")
|
912 |
+
|
913 |
+
Financial_answer_8 = qa.invoke("Outline the company's obligations under any guarantees or indemnities provided to creditors in the loan, bond, or security agreements. Are there any limitations on the enforcement of these guarantees?")
|
914 |
+
|
915 |
+
Financial_answer_9 = qa.invoke("What are the rights of bondholders or lenders under the bond issuance or loan agreements? How are creditor meetings conducted, and how can creditors exercise their rights in the event of default?")
|
916 |
+
|
917 |
+
Financial_answer_10 = qa.invoke("What protections are in place for junior creditors or subordinated debt holders in the loan, bond, or security agreements? How are their rights affected in the event of enforcement or restructuring?")
|
918 |
+
|
919 |
+
Financial_answer_11 = qa.invoke("What are the company's obligations to provide financial information to creditors under its loan, bond, or security agreements? How frequently must the company report, and what information is typically required?")
|
920 |
+
|
921 |
+
|
922 |
+
financial_output = f"**__The parties and their key obligations:__** {Financial_answer_0} \n\n**__Borrower/Issuer Obligations and Covenants:__** {Financial_answer_1} \n\n **__Events of Default and Cross-Default Provisions:__** {Financial_answer_3} \n\n **__Rights of Secured Creditors and Enforcement of Security:__** {Financial_answer_4} \n\n **__Acceleration Clauses and Early Repayment Triggers:__** {Financial_answer_5} \n\n **__Enforcement of Security Interests:__** {Financial_answer_6} \n\n **__Intercreditor Decision-Making and Prioritization:__** {Financial_answer_7} \n\n **__Guarantees and Indemnities Obligations:__** {Financial_answer_8} \n\n **__Rights of Bondholders and Default Procedures:__** {Financial_answer_9} \n\n **__Protections for Junior Creditors:__** {Financial_answer_10} \n\n **__Financial Reporting Obligations to Creditors:__** {Financial_answer_11}"
|
923 |
+
|
924 |
+
with open("financial_analysis.txt", 'w') as file:
|
925 |
+
file.write(financial_output)
|
926 |
+
|
927 |
+
return financial_output
|
928 |
+
|
929 |
+
def intercreditor_strategy():
|
930 |
+
doc_retriever_intercreditor, query_engine = create_vector_database_intercreditor()
|
931 |
+
doc_retriever_intercreditor = doc_retriever_intercreditor.as_retriever()
|
932 |
+
|
933 |
+
prompt_template = """<|system|>
|
934 |
+
"You are a seasoned attorney specializing in financial law and legal analysis.You write expert analyses for institutional investors.
|
935 |
+
Give only specific details and contract clauses about the provided documents.
|
936 |
+
Your answer should not exceed three paragraphs. The maximum number of sentences is twenty.
|
937 |
+
The text should be technical legal text but easy to understand for a professional investor.
|
938 |
+
Divide the output into paragraphs.
|
939 |
+
Explain the legal contents of the clauses and sections relevant to the question.
|
940 |
+
Include the source of the answer, including the titles of the contract clauses from which the information was obtained as a reference.
|
941 |
+
Use other knowledge to supplement the contract terms and conditions only if absolutely necessary.<|end|>
|
942 |
+
<|user|>
|
943 |
+
Answer the {question} based on the information you find in context: {context} <|end|>
|
944 |
+
<|assistant|>"""
|
945 |
+
|
946 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"])
|
947 |
+
|
948 |
+
qa = (
|
949 |
+
{
|
950 |
+
"context": doc_retriever_intercreditor,
|
951 |
+
"question": RunnablePassthrough(),
|
952 |
+
}
|
953 |
+
| prompt
|
954 |
+
| llm
|
955 |
+
| StrOutputParser()
|
956 |
+
)
|
957 |
+
|
958 |
+
Intercreditor_answer_1 = query_engine.query("Specify the parties to the intercreditor agreement, and what are their key roles, including senior and subordinated creditors, and security trustees or security agents?")
|
959 |
+
|
960 |
+
Intercreditor_answer_2 = qa.invoke("How is the ranking and priority of claims established among creditors under the intercreditor agreement? Describe the key clauses related to subordination and any waterfall or payment distribution.")
|
961 |
+
|
962 |
+
Intercreditor_answer_3 = qa.invoke("How are enforcement actions managed under the intercreditor agreement? What are the contractual clauses regulating appointing a lead enforcement agent. What clauses govern the coordination between senior and junior creditors handled during enforcement? How do the intercreditor agreement provisions handle enforcement blockages or restrictions on junior creditors?")
|
963 |
+
|
964 |
+
Intercreditor_answer_4 = qa.invoke("What are the standstill and turnover provisions under the agreement? Under what circumstances can subordinated or junior creditors be restricted from enforcing their rights, and when must they turn over proceeds to senior creditors?")
|
965 |
+
|
966 |
+
Intercreditor_answer_5 = qa.invoke("How do the intercreditor agreement provisions handle payment blockages or restrictions on junior creditors? What are the specific terms concerning limitations on junior creditors in relation to payment receipt during the enforcement period?")
|
967 |
+
|
968 |
+
Intercreditor_answer_6 = qa.invoke("What contractual dispute resolution mechanisms are established within the intercreditor agreement for resolving conflicts between senior and junior creditors, or other creditor groups?")
|
969 |
+
|
970 |
+
Intercreditor_answer_7 = qa.invoke("How does the intercreditor agreement address the distribution of enforcement proceeds? What are the priority rules for distributing recoveries, and how are they applied among different classes of creditors?")
|
971 |
+
|
972 |
+
Intercreditor_answer_8 = qa.invoke("What provisions govern amendments and waivers under the intercreditor agreement? How are decisions to amend key terms or waive rights made among the creditors, and what voting thresholds are required?")
|
973 |
+
|
974 |
+
Intercreditor_answer_9 = qa.invoke("What limitations or restrictions are imposed on junior creditors in insolvency or restructuring proceedings under the intercreditor agreement? Are there any specific conditions that prevent junior creditors from exercising their rights independently?")
|
975 |
+
|
976 |
+
Intercreditor_answer_10 = qa.invoke("What reporting or information-sharing obligations are outlined in the intercreditor agreement? How frequently must updates be provided, and what types of financial or operational information must be shared among creditor groups?")
|
977 |
+
|
978 |
+
|
979 |
+
intercreditor_output = f"**__Parties and Roles under the Intercreditor Agreement:__** {Intercreditor_answer_1} \n\n**__Ranking and Priority of Claims:__** {Intercreditor_answer_2} \n\n**__Enforcement Actions and Coordination Procedures:__** {Intercreditor_answer_3} \n\n**__Standstill and Turnover Provisions:__** {Intercreditor_answer_4} \n\n**__Payment Blockages and Restrictions on Junior Creditors:__** {Intercreditor_answer_5} \n\n**__Dispute Resolution and Conflict Management:__** {Intercreditor_answer_6} \n\n**__Distribution of Proceeds and Priority Rules:__** {Intercreditor_answer_7} \n\n**__Amendments and Waivers:__** {Intercreditor_answer_8} \n\n**__Restrictions on Junior Creditors in Insolvency or Restructuring:__** {Intercreditor_answer_9} \n\n **__Information-Sharing and Reporting Obligations:__** {Intercreditor_answer_10}"
|
980 |
+
|
981 |
+
with open("intercreditor_analysis.txt", 'w') as file:
|
982 |
+
file.write(intercreditor_output)
|
983 |
+
|
984 |
+
return intercreditor_output
|
985 |
+
|
986 |
+
|
987 |
+
def LPA_strategy():
|
988 |
+
doc_retriever_LPA, query_engine = create_vector_database_LPA()
|
989 |
+
doc_retriever_LPA = doc_retriever_LPA.as_retriever()
|
990 |
+
|
991 |
+
prompt_template = """<|system|>
|
992 |
+
"You are a seasoned attorney specializing in financial law and legal analysis.You write expert analyses for institutional investors.
|
993 |
+
Give only specific details and contract clauses about the provided documents.
|
994 |
+
Your answer should not exceed three paragraphs. The maximum number of sentences is twenty.
|
995 |
+
The text should be technical legal text but easy to understand for a professional investor.
|
996 |
+
Divide the output into paragraphs. Quote the relevant part of the context text if needed.
|
997 |
+
Explain the legal contents of the clauses and sections relevant to the question.
|
998 |
+
Include the source of the answer, including the titles of the contract clauses from which the information was obtained as a reference.
|
999 |
+
Use other knowledge to supplement the contract terms and conditions only if absolutely necessary.<|end|>
|
1000 |
+
<|user|>
|
1001 |
+
Answer the {question} based on the information you find in context: {context} <|end|>
|
1002 |
+
<|assistant|>"""
|
1003 |
+
|
1004 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"])
|
1005 |
+
|
1006 |
+
qa = (
|
1007 |
+
{
|
1008 |
+
"context": doc_retriever_LPA,
|
1009 |
+
"question": RunnablePassthrough(),
|
1010 |
+
}
|
1011 |
+
| prompt
|
1012 |
+
| llm
|
1013 |
+
| StrOutputParser()
|
1014 |
+
)
|
1015 |
+
|
1016 |
+
PE_answer_0 = query_engine.query("Who are the key parties to the Limited Partnership Agreement (LPA), such as the General Partner (GP), Limited Partners (LPs), and other relevant stakeholders?")
|
1017 |
+
|
1018 |
+
PE_answer_1 = qa.invoke("What are the key obligations and responsibilities of the General Partner under the Limited Partnership Agreement? Include details on fiduciary duties, reporting obligations, and fund management responsibilities.")
|
1019 |
+
|
1020 |
+
PE_answer_2 = qa.invoke("What are the key rights and restrictions of the Limited Partners under the Limited Partnership Agreement? Include details on withdrawal rights, transferability, and voting rights.")
|
1021 |
+
|
1022 |
+
PE_answer_3 = qa.invoke("What are the management fees, carried interest arrangements, and other compensation structures outlined in the Limited Partnership Agreement or Fund Memorandum?")
|
1023 |
+
|
1024 |
+
PE_answer_4 = qa.invoke("What provisions govern the investment restrictions and limitations under the Limited Partnership Agreement? Include details on diversification requirements, prohibited investments, and geographic or sectoral focus.")
|
1025 |
+
|
1026 |
+
PE_answer_5 = qa.invoke("What are the provisions governing the distribution of profits and return of capital under the Limited Partnership Agreement? Include details on preferred returns, waterfalls, and clawback provisions.")
|
1027 |
+
|
1028 |
+
PE_answer_6 = qa.invoke("What are the key risk factors and disclosures provided in the Fund Memorandum? Include details on market risk, liquidity risk, and conflicts of interest.")
|
1029 |
+
|
1030 |
+
PE_answer_7 = qa.invoke("What are the provisions for resolving disputes among the General Partner, Limited Partners, or other stakeholders under the Limited Partnership Agreement?")
|
1031 |
+
|
1032 |
+
PE_answer_8 = qa.invoke("What are the General Partner's rights and obligations in raising additional funds or successor funds? Include details on any restrictions or requirements under the Limited Partnership Agreement.")
|
1033 |
+
|
1034 |
+
PE_answer_9 = qa.invoke("What are the reporting and disclosure obligations of the General Partner to the Limited Partners? Include details on financial reporting, capital account statements, and other periodic updates.")
|
1035 |
+
|
1036 |
+
PE_answer_10 = qa.invoke("What are the terms and conditions for fund dissolution and winding up under the Limited Partnership Agreement? Include details on liquidation procedures and distribution priorities.")
|
1037 |
+
|
1038 |
+
PE_answer_11 = qa.invoke("What provisions govern Limited Partner advisory committees or governance mechanisms within the fund? Include details on their powers, composition, and decision-making processes.")
|
1039 |
+
|
1040 |
+
PE_answer_12 = qa.invoke("What key-man provisions does the limited partnership contain? Who are the specific persons obligated to manage the fund? What time do they have to devote to the management of the fund.")
|
1041 |
+
|
1042 |
+
|
1043 |
+
pe_financial_output = f"**__Key Parties and Stakeholders:__** {PE_answer_0} \n\n**__General Partner Obligations and Responsibilities:__** {PE_answer_1} \n\n**__Limited Partner Rights and Restrictions:__** {PE_answer_2} \n\n**__Fees, Carried Interest, and Compensation:__** {PE_answer_3} \n\n**__Investment Restrictions and Limitations:__** {PE_answer_4} \n\n**__Profit Distributions and Clawback Provisions:__** {PE_answer_5} \n\n**__Risk Factors and Disclosures:__** {PE_answer_6} \n\n**__Dispute Resolution Mechanisms:__** {PE_answer_7} \n\n**__Fundraising and Successor Fund Obligations:__** {PE_answer_8} \n\n**__General Partner Reporting Obligations:__** {PE_answer_9} \n\n**__Fund Dissolution and Winding Up:__** {PE_answer_10} \n\n**__Limited Partner Advisory Committees and Governance:__** {PE_answer_11} \n\n**__Key man provisions:__** {PE_answer_12}"
|
1044 |
+
|
1045 |
+
|
1046 |
+
with open("LPA_analysis.txt", 'w') as file:
|
1047 |
+
file.write(pe_financial_output)
|
1048 |
+
|
1049 |
+
return pe_financial_output
|
1050 |
+
|
1051 |
+
def ESG_strategy():
|
1052 |
+
doc_retriever_ESG, query_engine = create_vector_database_ESG()
|
1053 |
+
doc_retriever_ESG = doc_retriever_ESG.as_retriever()
|
1054 |
+
|
1055 |
+
prompt_template = """<|system|>
|
1056 |
+
You are a seasoned specialist in environmental, social and governance matters.
|
1057 |
+
Always use figures, numerical and statistical data when possible.
|
1058 |
+
Your answer should not exceed three paragraphs. The maximum number of sentences is twenty.
|
1059 |
+
The text should be technical text but easy to understand for a professional investor.
|
1060 |
+
Divide the output into paragraphs.
|
1061 |
+
Include the source of the answer, including the titles of the relevant document from which the information was obtained as a reference.
|
1062 |
+
Use other knowledge to supplement the contract terms and conditions only if absolutely necessary.<|end|>
|
1063 |
+
Quote the relevant part of the context text if needed.<|user|>
|
1064 |
+
Answer the {question} based on the information you find in context: {context} <|end|>
|
1065 |
+
<|assistant|>"""
|
1066 |
+
|
1067 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"])
|
1068 |
+
|
1069 |
+
qa = (
|
1070 |
+
{
|
1071 |
+
"context": doc_retriever_ESG,
|
1072 |
+
"question": RunnablePassthrough(),
|
1073 |
+
}
|
1074 |
+
| prompt
|
1075 |
+
| llm
|
1076 |
+
| StrOutputParser()
|
1077 |
+
)
|
1078 |
+
|
1079 |
+
ESG_answer_1 = qa.invoke("Give a summary what specific ESG measures the company has taken recently and compare these to the best practices.")
|
1080 |
+
ESG_answer_2 = qa.invoke("Does the company's main business fall under the European Union's taxonomy regulation? Answer whether the company is taxonomy compliant under European Union Taxonomy Regulation?")
|
1081 |
+
ESG_answer_3 = qa.invoke("Describe what specific ESG transparency commitments the company has given. Give details how the company has followed the Paris Treaty's obligation to limit globabl warming to 1.5 celcius degrees.")
|
1082 |
+
ESG_answer_4 = qa.invoke("Does the company have carbon emissions reduction plan? Has the company reached its carbon dioxide reduction objectives? Set the company's carbon footprint by location and its development or equivalent figures in a table. List carbon dioxide emissions compared to turnover.")
|
1083 |
+
ESG_answer_5 = qa.invoke("Describe and set out in a table the following specific information: (i) Scope 1 CO2 emissions, (ii) Scope 2 CO2 emissions, and (iii) Scope 3 CO2 emissions of the company for 2021, 2022 and 2023. List the material changes relating to these figures.")
|
1084 |
+
ESG_answer_6 = qa.invoke("List in a table the company's energy and renewable energy usage for each material activity. Explain the main energy efficiency measures taken by the company.")
|
1085 |
+
ESG_answer_7 = qa.invoke("Does the company follow UN Guiding Principles on Business and Human Rights, ILO Declaration on Fundamental Principles and Rights at Work or OECD Guidelines for Multinational Enterprises that involve affected communities?")
|
1086 |
+
ESG_answer_8 = qa.invoke("List the environmental permits and certifications held by the company. Set out and explain any environmental procedures, investigations, and decisions taken against the company. Answer whether the company's locations or operations are connected to areas sensitive in relation to biodiversity.")
|
1087 |
+
ESG_answer_9 = qa.invoke("Set out waste management produces by the company and possible waste into the soil. Describe if the company's real estates have hazardous waste.")
|
1088 |
+
ESG_answer_10 = qa.invoke("What percentage of women are represented in the (i) board, (ii) executive directors, and (iii) upper management? Set out the measures taken to have the gender balance on the upper management of the company.")
|
1089 |
+
ESG_answer_11 = qa.invoke("What policies has the company implemented to counter money laundering and corruption?")
|
1090 |
+
|
1091 |
+
ESG_output = f"**__Summary of ESG reporting and obligations:__** {ESG_answer_1} \n\n **__Compliance with taxonomy:__** \n\n {ESG_answer_2} \n\n **__Disclosure transparency:__** \n\n {ESG_answer_3} \n\n **__Carbon footprint:__** \n\n {ESG_answer_4} \n\n **__Carbon dioxide emissions:__** \n\n {ESG_answer_5} \n\n **__Renewable energy:__** \n\n {ESG_answer_6} \n\n **__Human rights compliance:__** \n\n {ESG_answer_7} \n\n **__Management and gender balance:__** \n\n {ESG_answer_8} \n\n **__Waste and other emissions:__** {ESG_answer_9} \n\n **__Gender equality:__** {ESG_answer_10} \n\n **__Anti-money laundering:__** {ESG_answer_11}"
|
1092 |
+
|
1093 |
+
with open("ESG_analysis.txt", 'w') as file:
|
1094 |
+
file.write(ESG_output)
|
1095 |
+
|
1096 |
+
return ESG_output
|
1097 |
+
|
1098 |
+
#-------------
|
1099 |
+
@st.cache_data
|
1100 |
+
def generate_strategy() -> str:
|
1101 |
+
strategic_output = ""
|
1102 |
+
|
1103 |
+
# Check which document exists and assign the respective strategy output to strategic_output
|
1104 |
+
if company_document:
|
1105 |
+
strategic_output = company_strategy()
|
1106 |
+
elif financial_document:
|
1107 |
+
strategic_output = financial_strategy()
|
1108 |
+
elif intercreditor_document:
|
1109 |
+
strategic_output = intercreditor_strategy()
|
1110 |
+
elif LPA_document:
|
1111 |
+
strategic_output = LPA_strategy()
|
1112 |
+
elif ESG_document:
|
1113 |
+
strategic_output = ESG_strategy()
|
1114 |
+
|
1115 |
+
# Set the combined result in a single session state key
|
1116 |
+
st.session_state.results["legal_analysis_button_key"] = strategic_output
|
1117 |
+
|
1118 |
+
return strategic_output
|
1119 |
+
#---------------
|
1120 |
+
#@st.cache_data
|
1121 |
+
|
1122 |
+
# Function to remove paragraphs (blank lines)
|
1123 |
+
def remove_paragraphs(input_file, output_file):
|
1124 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
1125 |
+
lines = f.readlines()
|
1126 |
+
|
1127 |
+
# Filter out blank lines (empty lines) or lines containing only whitespace
|
1128 |
+
lines = [line for line in lines if line.strip()]
|
1129 |
+
|
1130 |
+
# Write the cleaned content back to a new file
|
1131 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
1132 |
+
f.writelines(lines)
|
1133 |
+
|
1134 |
+
def remove_paragraphs(input_file, output_file):
|
1135 |
+
"""This function removes paragraphs and saves the new content to output_file."""
|
1136 |
+
try:
|
1137 |
+
with open(input_file, 'r', encoding='utf-8') as infile, open(output_file, 'w', encoding='utf-8') as outfile:
|
1138 |
+
for line in infile:
|
1139 |
+
# Remove paragraphs by stripping empty lines (or any other method)
|
1140 |
+
if line.strip():
|
1141 |
+
outfile.write(line)
|
1142 |
+
except Exception as e:
|
1143 |
+
print(f"Error processing {input_file}: {e}")
|
1144 |
+
|
1145 |
+
def create_pdf():
|
1146 |
+
from fpdf import FPDF
|
1147 |
+
import os
|
1148 |
+
import re
|
1149 |
+
|
1150 |
+
class PDF(FPDF):
|
1151 |
+
pass # Add custom functionality here if needed
|
1152 |
+
|
1153 |
+
# Define the possible files
|
1154 |
+
files = [
|
1155 |
+
"company_analysis.txt",
|
1156 |
+
"financial_analysis.txt",
|
1157 |
+
"intercreditor_analysis.txt",
|
1158 |
+
"LPA_analysis.txt",
|
1159 |
+
"ESG_analysis.txt"
|
1160 |
+
]
|
1161 |
+
|
1162 |
+
# Check which file exists and set input_file accordingly
|
1163 |
+
input_file = None # Default to None, in case no file exists
|
1164 |
+
output_file = None # Default to None, in case no file exists
|
1165 |
+
|
1166 |
+
for file in files:
|
1167 |
+
if os.path.exists(file):
|
1168 |
+
input_file = file # Set the input_file to the first file that exists
|
1169 |
+
output_file = 'legal_analysis_no_paragraphs.txt' # Set output_file when input_file is found
|
1170 |
+
break # Exit the loop once the first matching file is found
|
1171 |
+
|
1172 |
+
if input_file and output_file:
|
1173 |
+
# Remove paragraphs from the selected file
|
1174 |
+
remove_paragraphs(input_file, output_file)
|
1175 |
+
|
1176 |
+
# Create the PDF document
|
1177 |
+
pdf = PDF()
|
1178 |
+
pdf.add_page()
|
1179 |
+
pdf.set_margins(14, 14, 14)
|
1180 |
+
|
1181 |
+
# Use the built-in "Helvetica" font
|
1182 |
+
pdf.set_font("Helvetica", size=14, style='B')
|
1183 |
+
|
1184 |
+
# Title of the PDF
|
1185 |
+
pdf.cell(0, 10, txt="Structured Document Analysis", ln=2, align='C')
|
1186 |
+
pdf.ln(4)
|
1187 |
+
pdf.line(14, pdf.get_y(), 190, pdf.get_y())
|
1188 |
+
|
1189 |
+
# Content
|
1190 |
+
pdf.set_font("Helvetica", size=11)
|
1191 |
+
|
1192 |
+
# Define regex to match bold and heading patterns
|
1193 |
+
heading_pattern = r"\*\*__(.*?)__\*\*" # Matches **__heading__**
|
1194 |
+
bold_pattern = r"\*\*(.*?)\*\*" # Matches **bold text**
|
1195 |
+
|
1196 |
+
try:
|
1197 |
+
with open(output_file, 'r', encoding='utf-8') as f:
|
1198 |
+
for line in f:
|
1199 |
+
line = line.strip()
|
1200 |
+
|
1201 |
+
# Replace problematic Unicode characters
|
1202 |
+
replacements = {
|
1203 |
+
"₁": "1", # Subscript 1
|
1204 |
+
"₂": "2", # Subscript 2
|
1205 |
+
"₃": "3", # Subscript 3
|
1206 |
+
"✓": "Check", # Checkmark
|
1207 |
+
"€": "EUR" # Euro symbol
|
1208 |
+
# Add more replacements as needed
|
1209 |
+
}
|
1210 |
+
for char, replacement in replacements.items():
|
1211 |
+
line = line.replace(char, replacement)
|
1212 |
+
|
1213 |
+
# Split the line into parts and apply the correct formatting
|
1214 |
+
parts = re.split(r'(\*\*__.*?__\*\*|\*\*.*?\*\*)', line) # Split on headings or bolds
|
1215 |
+
for part in parts:
|
1216 |
+
if re.match(heading_pattern, part): # If part is a heading
|
1217 |
+
content = re.sub(heading_pattern, r'\1', part) # Remove the **__ and __**
|
1218 |
+
pdf.set_font("Helvetica", size=12, style='B') # Larger font for heading
|
1219 |
+
pdf.ln(1)
|
1220 |
+
pdf.multi_cell(0, 5, txt=content, align='L')
|
1221 |
+
pdf.ln(1)
|
1222 |
+
elif re.match(bold_pattern, part): # If part is bold text (convert to sub-heading)
|
1223 |
+
content = re.sub(bold_pattern, r'\1', part) # Remove ** for bold
|
1224 |
+
# Use multi_cell for bold text as a sub-heading, it will wrap the text
|
1225 |
+
pdf.set_font("Helvetica", size=11, style='B') # Larger font for sub-heading
|
1226 |
+
pdf.ln(1)
|
1227 |
+
pdf.multi_cell(0, 5, txt=content, align='L') # Multi-cell prevents overflow
|
1228 |
+
pdf.ln(1) # Add some space after the sub-heading
|
1229 |
+
else:
|
1230 |
+
# Regular Text
|
1231 |
+
pdf.set_font("Helvetica", size=11)
|
1232 |
+
pdf.ln(1)
|
1233 |
+
pdf.multi_cell(0, 5, txt=part, align='L')
|
1234 |
+
pdf.ln(1)
|
1235 |
+
|
1236 |
+
except UnicodeEncodeError:
|
1237 |
+
print("UnicodeEncodeError: Some characters could not be encoded. Skipping...")
|
1238 |
+
pass # Skip problematic lines
|
1239 |
+
|
1240 |
+
# Save the PDF
|
1241 |
+
output_pdf_path = "Document_analysis.pdf"
|
1242 |
+
pdf.output(output_pdf_path)
|
1243 |
+
else:
|
1244 |
+
print("No valid input file found.")
|
1245 |
+
# Handle the case where no valid file exists
|
1246 |
+
|
1247 |
+
|
1248 |
+
#----------------
|
1249 |
+
if 'results' not in st.session_state:
|
1250 |
+
st.session_state.results = {
|
1251 |
+
"legal_analysis_button_key": {}
|
1252 |
+
}
|
1253 |
+
|
1254 |
+
loaders = {'.pdf': PyMuPDFLoader,
|
1255 |
+
'.xml': UnstructuredXMLLoader,
|
1256 |
+
'.csv': CSVLoader,
|
1257 |
+
}
|
1258 |
+
|
1259 |
+
def create_directory_loader(file_type, directory_path):
|
1260 |
+
return DirectoryLoader(
|
1261 |
+
path=directory_path,
|
1262 |
+
glob=f"**/*{file_type}",
|
1263 |
+
loader_cls=loaders[file_type],
|
1264 |
+
)
|
1265 |
+
|
1266 |
+
#---------------
|
1267 |
+
strategies_container = st.container()
|
1268 |
+
with strategies_container:
|
1269 |
+
mrow1_col1, mrow1_col2 = st.columns(2)
|
1270 |
+
|
1271 |
+
st.sidebar.info("To get started, please upload the documents from the company you would like to analyze.")
|
1272 |
+
button_container = st.sidebar.container()
|
1273 |
+
if os.path.exists("company_analysis.txt") or os.path.exists("financial_analysis.txt") or os.path.exists("intercreditor_analysis.txt") or os.path.exists("LPA_analysis.txt") or os.path.exists("ESG_analysis.txt"):
|
1274 |
+
create_pdf()
|
1275 |
+
with open("Document_analysis.pdf", "rb") as pdf_file:
|
1276 |
+
PDFbyte = pdf_file.read()
|
1277 |
+
|
1278 |
+
st.sidebar.download_button(label="Download Analysis",
|
1279 |
+
data=PDFbyte,
|
1280 |
+
file_name="Document Analysis.pdf",
|
1281 |
+
mime='application/octet-stream',
|
1282 |
+
)
|
1283 |
+
|
1284 |
+
if button_container.button("Clear All"):
|
1285 |
+
|
1286 |
+
st.session_state.button_states = {
|
1287 |
+
"legal_analysis_button_key": False,
|
1288 |
+
}
|
1289 |
+
st.session_state.button_states = {
|
1290 |
+
"financial_analysis_button_key": False,
|
1291 |
+
}
|
1292 |
+
st.session_state.results = {}
|
1293 |
+
|
1294 |
+
st.session_state['history'] = []
|
1295 |
+
st.session_state['generated'] = ["Let's discuss the company documents 🤗"]
|
1296 |
+
st.session_state['past'] = ["Hey ! 👋"]
|
1297 |
+
st.cache_data.clear()
|
1298 |
+
st.cache_resource.clear()
|
1299 |
+
|
1300 |
+
# List of files to delete
|
1301 |
+
files_to_delete = [
|
1302 |
+
"company_analysis.txt",
|
1303 |
+
"financial_analysis.txt",
|
1304 |
+
"intercreditor_analysis.txt",
|
1305 |
+
"LPA_analysis.txt",
|
1306 |
+
"ESG_analysis.txt"
|
1307 |
+
]
|
1308 |
+
|
1309 |
+
# Loop through each file and try to delete it
|
1310 |
+
for file_name in files_to_delete:
|
1311 |
+
if os.path.exists(file_name):
|
1312 |
+
try:
|
1313 |
+
os.unlink(file_name) # Delete the file
|
1314 |
+
st.success(f"Successfully deleted {file_name}")
|
1315 |
+
except Exception as e:
|
1316 |
+
st.error(f"Error deleting {file_name}: {e}")
|
1317 |
+
else:
|
1318 |
+
st.warning(f"{file_name} not found, skipping...")
|
1319 |
+
|
1320 |
+
# Check if the subfolder exists
|
1321 |
+
if os.path.exists("Corporate_Documents"):
|
1322 |
+
for filename in os.listdir("Corporate_Documents"):
|
1323 |
+
file_path = os.path.join("Corporate_Documents", filename)
|
1324 |
+
try:
|
1325 |
+
if os.path.isfile(file_path):
|
1326 |
+
os.unlink(file_path)
|
1327 |
+
except Exception as e:
|
1328 |
+
st.error(f"Error deleting {file_path}: {e}")
|
1329 |
+
else:
|
1330 |
+
pass
|
1331 |
+
|
1332 |
+
# Check if the subfolder exists
|
1333 |
+
if os.path.exists("data"):
|
1334 |
+
for filename in os.listdir("data"):
|
1335 |
+
file_path = os.path.join("data", filename)
|
1336 |
+
try:
|
1337 |
+
if os.path.isfile(file_path):
|
1338 |
+
os.unlink(file_path)
|
1339 |
+
except Exception as e:
|
1340 |
+
st.error(f"Error deleting {file_path}: {e}")
|
1341 |
+
else:
|
1342 |
+
pass
|
1343 |
+
|
1344 |
+
if os.path.exists("Financial_Documents"):
|
1345 |
+
# Iterate through files in the subfolder and delete them
|
1346 |
+
for filename in os.listdir("Financial_Documents"):
|
1347 |
+
file_path = os.path.join("Financial_Documents", filename)
|
1348 |
+
try:
|
1349 |
+
if os.path.isfile(file_path):
|
1350 |
+
os.unlink(file_path)
|
1351 |
+
except Exception as e:
|
1352 |
+
st.error(f"Error deleting {file_path}: {e}")
|
1353 |
+
else:
|
1354 |
+
pass
|
1355 |
+
# st.warning("No 'data' subfolder found.")
|
1356 |
+
|
1357 |
+
if os.path.exists("Intercreditor_Documents"):
|
1358 |
+
# Iterate through files in the subfolder and delete them
|
1359 |
+
for filename in os.listdir("Intercreditor_Documents"):
|
1360 |
+
file_path = os.path.join("Intercreditor_Documents", filename)
|
1361 |
+
try:
|
1362 |
+
if os.path.isfile(file_path):
|
1363 |
+
os.unlink(file_path)
|
1364 |
+
except Exception as e:
|
1365 |
+
st.error(f"Error deleting {file_path}: {e}")
|
1366 |
+
else:
|
1367 |
+
pass
|
1368 |
+
|
1369 |
+
if os.path.exists("LPA"):
|
1370 |
+
# Iterate through files in the subfolder and delete them
|
1371 |
+
for filename in os.listdir("LPA"):
|
1372 |
+
file_path = os.path.join("LPA", filename)
|
1373 |
+
try:
|
1374 |
+
if os.path.isfile(file_path):
|
1375 |
+
os.unlink(file_path)
|
1376 |
+
except Exception as e:
|
1377 |
+
st.error(f"Error deleting {file_path}: {e}")
|
1378 |
+
else:
|
1379 |
+
pass
|
1380 |
+
|
1381 |
+
if os.path.exists("ESG"):
|
1382 |
+
# Iterate through files in the subfolder and delete them
|
1383 |
+
for filename in os.listdir("ESG"):
|
1384 |
+
file_path = os.path.join("ESG", filename)
|
1385 |
+
try:
|
1386 |
+
if os.path.isfile(file_path):
|
1387 |
+
os.unlink(file_path)
|
1388 |
+
except Exception as e:
|
1389 |
+
st.error(f"Error deleting {file_path}: {e}")
|
1390 |
+
else:
|
1391 |
+
pass
|
1392 |
+
|
1393 |
+
with mrow1_col1:
|
1394 |
+
st.subheader("Asset Management Document Analysis")
|
1395 |
+
st.info("This tool is designed to provide a legal analysis of the documentation for institutional investors.")
|
1396 |
+
|
1397 |
+
button_container2 = st.container()
|
1398 |
+
if "button_states" not in st.session_state:
|
1399 |
+
st.session_state.button_states = {
|
1400 |
+
"legal_analysis_button_key": False,
|
1401 |
+
}
|
1402 |
+
|
1403 |
+
if "results" not in st.session_state:
|
1404 |
+
st.session_state.results = {}
|
1405 |
+
|
1406 |
+
if button_container2.button("Legal Analysis", key=legal_analysis_button_key):
|
1407 |
+
st.session_state.button_states[legal_analysis_button_key] = True
|
1408 |
+
result_generator = generate_strategy() # Call the generator function
|
1409 |
+
st.session_state.results["legal_analysis_output"] = result_generator
|
1410 |
+
|
1411 |
+
if "legal_analysis_output" in st.session_state.results:
|
1412 |
+
st.markdown(st.session_state.results["legal_analysis_output"])
|
1413 |
+
|
1414 |
+
st.divider()
|
1415 |
+
|
1416 |
+
with mrow1_col2:
|
1417 |
+
if "legal_analysis_button_key" in st.session_state.results and st.session_state.results["legal_analysis_button_key"]:
|
1418 |
+
|
1419 |
+
run_id = str(uuid.uuid4())
|
1420 |
+
|
1421 |
+
scratchpad = {
|
1422 |
+
"questions": [], # list of type Question
|
1423 |
+
"answerpad": [],
|
1424 |
+
}
|
1425 |
+
|
1426 |
+
embed_model = HuggingFaceEmbeddings()
|
1427 |
+
|
1428 |
+
vs_company = Chroma(
|
1429 |
+
persist_directory="./chroma_db_company", # Directory for persistent storage
|
1430 |
+
collection_name="rag_company",
|
1431 |
+
embedding_function=embed_model,
|
1432 |
+
)
|
1433 |
+
vs_financial = Chroma(
|
1434 |
+
persist_directory="./chroma_db_financial", # Directory for persistent storage
|
1435 |
+
collection_name="rag_financial",
|
1436 |
+
embedding_function=embed_model,
|
1437 |
+
)
|
1438 |
+
vs_intercreditor = Chroma(
|
1439 |
+
persist_directory="./chroma_db_intercreditor", # Directory for persistent storage
|
1440 |
+
collection_name="rag_intercreditor",
|
1441 |
+
embedding_function=embed_model,
|
1442 |
+
)
|
1443 |
+
vs_LPA = Chroma(
|
1444 |
+
persist_directory="./chroma_db_LPA", # Directory for persistent storage
|
1445 |
+
collection_name="rag_LPA",
|
1446 |
+
embedding_function=embed_model,
|
1447 |
+
)
|
1448 |
+
|
1449 |
+
vs_ESG = Chroma(
|
1450 |
+
persist_directory="./chroma_db_ESG", # Directory for persistent storage
|
1451 |
+
collection_name="rag_ESG",
|
1452 |
+
embedding_function=embed_model,
|
1453 |
+
)
|
1454 |
+
|
1455 |
+
if company_document:
|
1456 |
+
store = vs_company
|
1457 |
+
elif financial_document:
|
1458 |
+
store = vs_financial
|
1459 |
+
elif intercreditor_document:
|
1460 |
+
store = vs_intercreditor
|
1461 |
+
elif LPA_document:
|
1462 |
+
store = vs_LPA
|
1463 |
+
elif ESG_document:
|
1464 |
+
store = vs_ESG
|
1465 |
+
else:
|
1466 |
+
store = None
|
1467 |
+
|
1468 |
+
agent_settings = {
|
1469 |
+
"max_iterations": 3,
|
1470 |
+
"num_atomistic_questions": 2,
|
1471 |
+
"num_questions_per_iteration": 4,
|
1472 |
+
"question_atomizer_temperature": 0,
|
1473 |
+
"question_creation_temperature": 0.4,
|
1474 |
+
"question_prioritisation_temperature": 0,
|
1475 |
+
"refine_answer_temperature": 0,
|
1476 |
+
"qa_temperature": 0,
|
1477 |
+
"analyser_temperature": 0,
|
1478 |
+
"intermediate_answers_length": 200,
|
1479 |
+
"answer_length": 500,
|
1480 |
+
}
|
1481 |
+
|
1482 |
+
# Updated prompt templates to include chat history
|
1483 |
+
def format_chat_history(chat_history):
|
1484 |
+
"""Format chat history as a single string for input to the chain."""
|
1485 |
+
formatted_history = "\n".join([f"User: {entry['input']}\nAI: {entry['output']}" for entry in chat_history])
|
1486 |
+
return formatted_history
|
1487 |
+
|
1488 |
+
# Initialize the agent with LCEL tools and memory
|
1489 |
+
memory = ConversationBufferMemory(memory_key="chat_history", k=3, return_messages=True)
|
1490 |
+
agent = Agent(agent_settings, scratchpad, store, True)
|
1491 |
+
def conversational_chat(query):
|
1492 |
+
# Get the result from the agent
|
1493 |
+
agent.run({"input": query, "chat_history": st.session_state['history']})
|
1494 |
+
|
1495 |
+
result = agent.get_latest_answer()
|
1496 |
+
|
1497 |
+
# Handle different response types
|
1498 |
+
if isinstance(result, dict):
|
1499 |
+
# Extract the main content if the result is a dictionary
|
1500 |
+
result = result.get("output", "") # Adjust the key as needed based on your agent's output
|
1501 |
+
elif isinstance(result, list):
|
1502 |
+
# If the result is a list, join it into a single string
|
1503 |
+
result = "\n".join(result)
|
1504 |
+
elif not isinstance(result, str):
|
1505 |
+
# Convert the result to a string if it is not already one
|
1506 |
+
result = str(result)
|
1507 |
+
|
1508 |
+
# Add the query and the result to the session state
|
1509 |
+
st.session_state['history'].append((query, result))
|
1510 |
+
|
1511 |
+
# Update memory with the conversation
|
1512 |
+
memory.save_context({"input": query}, {"output": result})
|
1513 |
+
|
1514 |
+
# Return the result
|
1515 |
+
return result
|
1516 |
+
|
1517 |
+
# Ensure session states are initialized
|
1518 |
+
if 'history' not in st.session_state:
|
1519 |
+
st.session_state['history'] = []
|
1520 |
+
|
1521 |
+
if 'generated' not in st.session_state:
|
1522 |
+
st.session_state['generated'] = ["Let's discuss the legal and financial matters 🤗"]
|
1523 |
+
|
1524 |
+
if 'past' not in st.session_state:
|
1525 |
+
st.session_state['past'] = ["Hey ! 👋"]
|
1526 |
+
|
1527 |
+
if 'input' not in st.session_state:
|
1528 |
+
st.session_state['input'] = ""
|
1529 |
+
|
1530 |
+
# Streamlit layout
|
1531 |
+
st.subheader("Discuss the documentation")
|
1532 |
+
st.info("This document research assistant enables you to discuss about the legal documentation.")
|
1533 |
+
response_container = st.container()
|
1534 |
+
container = st.container()
|
1535 |
+
|
1536 |
+
with container:
|
1537 |
+
with st.form(key='my_form'):
|
1538 |
+
user_input = st.text_input("Query:", placeholder="What would you like to know about the documentation", key='input')
|
1539 |
+
submit_button = st.form_submit_button(label='Send')
|
1540 |
+
if submit_button and user_input:
|
1541 |
+
output = conversational_chat(user_input)
|
1542 |
+
st.session_state['past'].append(user_input)
|
1543 |
+
st.session_state['generated'].append(output)
|
1544 |
+
user_input = "Query:"
|
1545 |
+
#st.session_state['input'] = ""
|
1546 |
+
# Display generated responses
|
1547 |
+
if st.session_state['generated']:
|
1548 |
+
with response_container:
|
1549 |
+
for i in range(len(st.session_state['generated'])):
|
1550 |
+
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="shapes")
|
1551 |
+
message(st.session_state["generated"][i], key=str(i), avatar_style="icons")
|
1552 |
+
|
chains_v2/.gitkeep
ADDED
File without changes
|
chains_v2/__init__.py
ADDED
File without changes
|
chains_v2/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (215 Bytes). View file
|
|
chains_v2/__pycache__/create_questions.cpython-311.pyc
ADDED
Binary file (2.24 kB). View file
|
|
chains_v2/__pycache__/most_pertinent_question.cpython-311.pyc
ADDED
Binary file (1.67 kB). View file
|
|
chains_v2/__pycache__/question_atomizer.cpython-311.pyc
ADDED
Binary file (2.09 kB). View file
|
|
chains_v2/__pycache__/refine_answer.cpython-311.pyc
ADDED
Binary file (1.89 kB). View file
|
|
chains_v2/__pycache__/research_compiler.cpython-311.pyc
ADDED
Binary file (1.77 kB). View file
|
|
chains_v2/__pycache__/retrieval_qa.cpython-311.pyc
ADDED
Binary file (2.17 kB). View file
|
|
chains_v2/create_questions.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms import BaseLLM
|
2 |
+
from langchain.base_language import BaseLanguageModel
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
|
6 |
+
|
7 |
+
class QuestionCreationChain(LLMChain):
|
8 |
+
"""Chain to generates subsequent questions."""
|
9 |
+
# Check what the below code line means and what it in practice does
|
10 |
+
@classmethod
|
11 |
+
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
|
12 |
+
questions_creation_template = (
|
13 |
+
"You are a part of a team. The ultimate goal of your team is to"
|
14 |
+
" answer the following Question: '{question}'.\n"
|
15 |
+
"Your team has discovered some new text (delimited by ```) that may be relevant to your ultimate goal."
|
16 |
+
" text: \n ``` {context} ``` \n"
|
17 |
+
"Your task is to ask new questions that may help your team achieve the ultimate goal."
|
18 |
+
" If you think that the text is relevant to your ultimate goal, then ask new questions."
|
19 |
+
" New questions should be based only on the text and the goal Question and no other previous knowledge."
|
20 |
+
" The new questions should have no semantic overlap with questions in the following list:\n"
|
21 |
+
" {previous_questions}\n"
|
22 |
+
"You can ask up to {num_questions} new questions."
|
23 |
+
" Return the questions as a comma separated list. "
|
24 |
+
" Format your response as a numbered list of questions, like:\n"
|
25 |
+
"n. First question\n"
|
26 |
+
"n. Second question\n"
|
27 |
+
"Start the list with number {start_id}"
|
28 |
+
)
|
29 |
+
|
30 |
+
prompt = PromptTemplate(
|
31 |
+
template=questions_creation_template,
|
32 |
+
input_variables=[
|
33 |
+
"question",
|
34 |
+
"context",
|
35 |
+
"previous_questions",
|
36 |
+
"num_questions",
|
37 |
+
"start_id",
|
38 |
+
],
|
39 |
+
)
|
40 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
chains_v2/most_pertinent_question.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from langchain.llms import BaseLLM
|
3 |
+
from langchain.base_language import BaseLanguageModel
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
|
7 |
+
class MostPertinentQuestion(LLMChain):
|
8 |
+
@classmethod
|
9 |
+
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
|
10 |
+
"""Get the response parser."""
|
11 |
+
question_prioritization_template = (
|
12 |
+
"You are provided with the following list of questions:"
|
13 |
+
" {unanswered_questions} \n"
|
14 |
+
" Your task is to choose one question from the above list"
|
15 |
+
" that is the most pertinent to the following query:\n"
|
16 |
+
" '{original_question}' \n"
|
17 |
+
" Respond with one question out of the provided list of questions."
|
18 |
+
" Return the questions as it is without any edits."
|
19 |
+
" Format your response like:\n"
|
20 |
+
" #. question"
|
21 |
+
)
|
22 |
+
prompt = PromptTemplate(
|
23 |
+
template=question_prioritization_template,
|
24 |
+
input_variables=["unanswered_questions", "original_question"],
|
25 |
+
)
|
26 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
27 |
+
|
28 |
+
|
29 |
+
|
chains_v2/question_atomizer.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from langchain.llms import BaseLLM
|
3 |
+
from langchain.base_language import BaseLanguageModel
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
|
7 |
+
|
8 |
+
class QuestionAtomizer(LLMChain):
|
9 |
+
"""
|
10 |
+
This chain splits the original question into a set of atomistic questions.
|
11 |
+
"""
|
12 |
+
|
13 |
+
@classmethod
|
14 |
+
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
|
15 |
+
"""Get the response parser."""
|
16 |
+
question_atomizer_template = (
|
17 |
+
" Your are provided with the following question:"
|
18 |
+
" '{question}' \n"
|
19 |
+
" Your task is to split the given question in at most {num_questions} very"
|
20 |
+
" simple, basic and atomist sub-questions (only if needed) using only the"
|
21 |
+
" information given in the question and no other prior knowledge."
|
22 |
+
" The sub-questions should be directly related to the intent of the original question."
|
23 |
+
" Consider the primary subject and the predicate of the question (if any) when creating sub questions.\n"
|
24 |
+
" Consider also the Parties, Rights, Obligations, Remedies, Actions, or Events mentioned"
|
25 |
+
" in the question (if any) when creating the sub questions.\n"
|
26 |
+
" The sub questions should have no semantic overlap with each other."
|
27 |
+
" Format your response like: \n"
|
28 |
+
" n. question"
|
29 |
+
)
|
30 |
+
prompt = PromptTemplate(
|
31 |
+
template=question_atomizer_template,
|
32 |
+
input_variables=["question", "num_questions"],
|
33 |
+
)
|
34 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
chains_v2/refine_answer.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms import BaseLLM
|
2 |
+
from langchain.base_language import BaseLanguageModel
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
|
6 |
+
|
7 |
+
class RefineAnswer(LLMChain):
|
8 |
+
"""
|
9 |
+
This refines the answer with every iteration.
|
10 |
+
"""
|
11 |
+
|
12 |
+
@classmethod
|
13 |
+
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
|
14 |
+
"""Get the response parser."""
|
15 |
+
prompt_template = (
|
16 |
+
"Your task is to answer the following question.\n"
|
17 |
+
" Question: '{question}'\n"
|
18 |
+
" You are provided with an existing Answer: \n---\n{answer}\n---\n\n"
|
19 |
+
" You are also provided with some additional context that may be relevant to the question.\n"
|
20 |
+
" New Context: \n---\n{context}\n---\n\n"
|
21 |
+
" You have the opportunity to rewrite and improve upon the existing answer."
|
22 |
+
" Use only the information from the existing answer and the given context to write better answer."
|
23 |
+
" Use a descriptive style and a formal, business, finance and legal language."
|
24 |
+
" If the context isn't useful, return the existing answer."
|
25 |
+
)
|
26 |
+
prompt = PromptTemplate(
|
27 |
+
template=prompt_template,
|
28 |
+
input_variables=["question", "answer", "context"],
|
29 |
+
)
|
30 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
chains_v2/research_compiler.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chains import LLMChain
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
|
4 |
+
def research_compiler(llm, question: str, notes: str, answer_length: int, verbose: bool = True):
|
5 |
+
# prompt_template = (
|
6 |
+
# "You are a researcher. Your task is to answer the following question\n"
|
7 |
+
# " Question: '{question}' \n"
|
8 |
+
# " You are provided with some notes (delimited between '___')"
|
9 |
+
# " ___\n{notes}\n ___\n"
|
10 |
+
# " The notes include answers to several questions that may be relevant to the original question."
|
11 |
+
# " Use only the information from the notes that is most pertinent to the question."
|
12 |
+
# " Write the answer solely based on the give notes and no other provious knowledge."
|
13 |
+
# " Answer should be clear, crisp and detailed."
|
14 |
+
# " Write your answer in less than {answer_length} words."
|
15 |
+
# " Answer :")
|
16 |
+
prompt_template = (
|
17 |
+
"You are a research agent who answers complex questions with clear, formal and detailed answers."
|
18 |
+
" You are provided with a question and some research notes prepared by your team."
|
19 |
+
" Question: {question} \n"
|
20 |
+
" Notes: {notes} \n"
|
21 |
+
" Your task is to answer the question entirely based on the given notes."
|
22 |
+
" The notes contain a list of intermediate-questions and answers that may be helpful to you in writing an answer."
|
23 |
+
" Use only the most relevant information from the notes while writing your answer."
|
24 |
+
" Do not use any prior knowledge while writing your answer, Do not make up the answer."
|
25 |
+
" If the notes are not relevant to the question, just return 'Context is insufficient to answer the question'."
|
26 |
+
" Remember your goal is to answer the question as objectively as possible."
|
27 |
+
" Write your answer succinctly in less than {answer_length} words."
|
28 |
+
)
|
29 |
+
|
30 |
+
PROMPT = PromptTemplate(
|
31 |
+
template=prompt_template, input_variables=["notes", "question", "answer_length"]
|
32 |
+
)
|
33 |
+
|
34 |
+
chain = LLMChain(
|
35 |
+
llm=llm,
|
36 |
+
prompt=PROMPT,
|
37 |
+
verbose=verbose,
|
38 |
+
)
|
39 |
+
|
40 |
+
result = chain({"question": question, "notes": notes, "answer_length": answer_length})
|
41 |
+
return result
|
chains_v2/retrieval_qa.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from langchain.llms import BaseLLM
|
2 |
+
# from langchain.base_language import BaseLanguageModel
|
3 |
+
# from langchain.chains import LLMChain
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
from langchain.vectorstores import PGVector
|
6 |
+
from langchain.chains import RetrievalQA
|
7 |
+
|
8 |
+
def retrieval_qa(llm, retriever: PGVector, question: str, answer_length: 250, verbose: bool = True):
|
9 |
+
"""
|
10 |
+
This chain is used to answer the intermediate questions.
|
11 |
+
"""
|
12 |
+
prompt_answer_length = f" Answer as succinctly as possible in less than {answer_length} words.\n"
|
13 |
+
|
14 |
+
prompt_template = \
|
15 |
+
"You are provided with a question and some helpful context to answer the question \n" \
|
16 |
+
" Question: {question}\n" \
|
17 |
+
" Context: {context}\n" \
|
18 |
+
"Your task is to answer the question based in the information given in the context" \
|
19 |
+
" Answer the question must be based on the context and no other previous knowledge or information should be used." \
|
20 |
+
" Your answer should not exceed three paragraphs. The maximum number of sentences is 15." \
|
21 |
+
" The text should be technical legal text but easy to understand for a professional investor." \
|
22 |
+
" Divide the output into paragraphs." \
|
23 |
+
" Include the source of the infomation including the clauses from which the information was obtained as reference in the format example (source: Clause 3.15)." \
|
24 |
+
" If the context provided is empty or irrelevant, just return 'Context not sufficient'"\
|
25 |
+
+ prompt_answer_length
|
26 |
+
|
27 |
+
PROMPT = PromptTemplate(
|
28 |
+
template=prompt_template, input_variables=["context", "question"]
|
29 |
+
)
|
30 |
+
|
31 |
+
qa_chain = RetrievalQA.from_chain_type(
|
32 |
+
llm=llm,
|
33 |
+
chain_type="stuff",
|
34 |
+
retriever=retriever,
|
35 |
+
return_source_documents=True,
|
36 |
+
chain_type_kwargs={"prompt": PROMPT},
|
37 |
+
verbose = verbose,
|
38 |
+
)
|
39 |
+
|
40 |
+
result = qa_chain({"query": question})
|
41 |
+
return result['result'], result['source_documents']
|
chroma_db_ESG/.gitkeep
ADDED
File without changes
|
chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/header.bin
ADDED
Binary file (100 Bytes). View file
|
|
chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/index_metadata.pickle
ADDED
Binary file (56 kB). View file
|
|
chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/length.bin
ADDED
Binary file (4 kB). View file
|
|
chroma_db_ESG/dc0baf8b-e7c0-4a55-a6f6-467be672f6a8/link_lists.bin
ADDED
Binary file (8.15 kB). View file
|
|
chroma_db_LPA/.gitkeep
ADDED
File without changes
|
chroma_db_LPA/chroma.sqlite3
ADDED
Binary file (147 kB). View file
|
|
chroma_db_company/.gitkeep
ADDED
File without changes
|
chroma_db_company/13cf8d6e-525d-4494-89ff-c587e1899359/header.bin
ADDED
Binary file (100 Bytes). View file
|
|
chroma_db_company/13cf8d6e-525d-4494-89ff-c587e1899359/length.bin
ADDED
Binary file (4 kB). View file
|
|
chroma_db_company/13cf8d6e-525d-4494-89ff-c587e1899359/link_lists.bin
ADDED
File without changes
|
chroma_db_financial/.gitkeep
ADDED
File without changes
|
chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/header.bin
ADDED
Binary file (100 Bytes). View file
|
|
chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/index_metadata.pickle
ADDED
Binary file (172 kB). View file
|
|
chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/length.bin
ADDED
Binary file (12 kB). View file
|
|
chroma_db_financial/0828d60d-31b5-4f35-b9ff-277c4ac46717/link_lists.bin
ADDED
Binary file (25.7 kB). View file
|
|
chroma_db_intercreditor/.gitkeep
ADDED
File without changes
|
chroma_db_intercreditor/chroma.sqlite3
ADDED
Binary file (147 kB). View file
|
|
data/.gitkeep
ADDED
File without changes
|
helpers/.gitkeep
ADDED
File without changes
|
helpers/__pycache__/questions_helper.cpython-311.pyc
ADDED
Binary file (2.8 kB). View file
|
|
helpers/__pycache__/response_helpers.cpython-311.pyc
ADDED
Binary file (2.55 kB). View file
|
|
helpers/questions_helper.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
getAnsweredQuestions = lambda questions: [
|
3 |
+
q for q in questions if q["status"] == "answered"
|
4 |
+
]
|
5 |
+
getUnansweredQuestions = lambda questions: [
|
6 |
+
q for q in questions if q["status"] == "unanswered"
|
7 |
+
]
|
8 |
+
getSubQuestions = lambda questions: [q for q in questions if q["type"] == "subquestion"]
|
9 |
+
getHopQuestions = lambda questions: [q for q in questions if q["type"] == "hop"]
|
10 |
+
getLastQuestionId = lambda questions: max([q["id"] for q in questions])
|
11 |
+
|
12 |
+
|
13 |
+
def markAnswered(questions, id: int):
|
14 |
+
for q in questions:
|
15 |
+
if q["id"] == id:
|
16 |
+
q["status"] = "answered"
|
17 |
+
|
18 |
+
|
19 |
+
def getQuestionById(questions, id: int):
|
20 |
+
q = [q for q in questions if q["id"] == id]
|
21 |
+
if len(q) == 0:
|
22 |
+
return None
|
23 |
+
return q[0]
|
helpers/response_helpers.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''def qStr2Dict(question: str) -> dict:
|
2 |
+
print('qStr2Dict :', question)
|
3 |
+
split = question.strip(" '\"").split(".", 1)
|
4 |
+
question_dict = {'id': int(split[0]), 'question': split[-1].strip()}
|
5 |
+
return question_dict
|
6 |
+
|
7 |
+
|
8 |
+
def result2QuestionsList(question_response: str, type: str, status: str) -> list:
|
9 |
+
response_splits = question_response.split("\n")
|
10 |
+
qlist = []
|
11 |
+
for q in response_splits:
|
12 |
+
question = {**qStr2Dict(q), 'type': type, 'status': status, 'answer': None}
|
13 |
+
qlist = qlist + [question]
|
14 |
+
return qlist
|
15 |
+
'''
|
16 |
+
def qStr2Dict(question: str) -> dict:
|
17 |
+
try:
|
18 |
+
print('qStr2Dict:', question)
|
19 |
+
split = question.strip(" '\"").split(".", 1)
|
20 |
+
|
21 |
+
# Check if id_part is 'Here are the sub-questions:', set it to integer 1
|
22 |
+
id_part = split[0].replace("#", "").strip()
|
23 |
+
if isinstance(id_part, str):
|
24 |
+
id_part = 1 # Convert any string to integer 1
|
25 |
+
|
26 |
+
# Convert id_part to integer, if not empty
|
27 |
+
id_part = int(id_part) if id_part else None
|
28 |
+
|
29 |
+
question_dict = {'id': id_part, 'question': split[-1].strip()}
|
30 |
+
return question_dict
|
31 |
+
except ValueError as e:
|
32 |
+
print("Error:", e)
|
33 |
+
return {'id': None, 'question': question.strip()}
|
34 |
+
|
35 |
+
def result2QuestionsList(question_response: str, type: str, status: str) -> list:
|
36 |
+
response_splits = question_response.split("\n")
|
37 |
+
qlist = []
|
38 |
+
for q in response_splits:
|
39 |
+
# Skip empty lines
|
40 |
+
if q.strip() == '':
|
41 |
+
continue
|
42 |
+
question = qStr2Dict(q)
|
43 |
+
question.update({'type': type, 'status': status, 'answer': None})
|
44 |
+
qlist.append(question)
|
45 |
+
return qlist
|