nmurugesh commited on
Commit
b66ee3d
·
verified ·
1 Parent(s): 7f8e771

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +1019 -0
  2. ind_nifty50list.csv +51 -0
  3. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,1019 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !pip install langchain langchain-groq sentence-transformers langchainhub faiss-cpu gradio gradio_client yfinance duckduckgo-search
2
+
3
+ import pandas as pd
4
+ import io
5
+ import requests
6
+ import os
7
+ import json
8
+ import matplotlib.pyplot as plt
9
+ from datetime import datetime, timedelta
10
+ import requests
11
+ from bs4 import BeautifulSoup
12
+ import requests
13
+ import yfinance as yf
14
+ import ast
15
+ import re
16
+ from datetime import datetime, timedelta
17
+ import pytz
18
+
19
+
20
+ # import langchain libraries
21
+ # !pip install langchain langchain-groq langchainhub duckduckgo-search
22
+ from langchain.agents import AgentExecutor
23
+ from langchain.agents import create_react_agent
24
+ from langchain.agents import create_structured_chat_agent
25
+ from langchain import hub
26
+ from langchain_groq import ChatGroq
27
+ from langchain_core.prompts import ChatPromptTemplate
28
+ from langchain.agents import Tool
29
+ from langchain_community.tools import DuckDuckGoSearchResults
30
+ from langchain.schema.output_parser import StrOutputParser
31
+ from langchain_core.prompts import PromptTemplate
32
+ from langchain_community.tools import DuckDuckGoSearchRun
33
+ from langchain.chains.combine_documents import create_stuff_documents_chain
34
+ from langchain.chains import create_retrieval_chain
35
+ from langchain import hub
36
+ from langchain.chains import RetrievalQA
37
+ from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
38
+ from langchain_community.document_loaders.csv_loader import CSVLoader
39
+ from langchain.tools import DuckDuckGoSearchRun
40
+ from langchain_core.output_parsers import JsonOutputParser
41
+ from langchain.agents import AgentExecutor, create_tool_calling_agent
42
+ from langchain_core.prompts import ChatPromptTemplate
43
+
44
+ #import gradio libraries
45
+ # !pip install gradio gradio_client
46
+ import gradio as gr
47
+
48
+
49
+ #import vectorstore libraries
50
+ # !pip install faiss-cpu
51
+ from langchain_community.vectorstores import FAISS
52
+ embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
53
+
54
+
55
+ ############################################
56
+ ############################################
57
+ # # Code steps involved:
58
+ # 1. Define the LLM
59
+ # 2. Extract data from NSE
60
+ # 2. Process the datafrme and store it as CSV files
61
+ # 3. Use Langchain CSV Loaders to load the CSV data
62
+ # 4. Create Vector Stores
63
+ # 5. Create company lists
64
+ # 6. Create the LLM functions required
65
+ # 7. Create the python functions for stock data and charting functions
66
+ # 8. Create Gradio Blocks
67
+ # 9. Find any recent real time addition to NSE data and add it to the vector stores.
68
+ # 10. Create retrievers and langchain QA retrieval chains
69
+ # 11. Define charts for default
70
+ # 12. Gradio app
71
+ ##########################################
72
+ ##########################################
73
+
74
+
75
+ # Define the LLM - We shall use ChatGroq of Groq Platform and LLama70B
76
+ # This llm definition is redundant as now models will be chosen by user
77
+ # llm = ChatGroq(
78
+ # api_key="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",
79
+ # model="llama3-70b-8192",
80
+ # # model = 'gemma-7b-it',
81
+ # temperature = 0
82
+ # # model = 'mixtral-8x7B-32768'
83
+ # )
84
+
85
+
86
+
87
+ # Get the data from NSE as pandas dataframe
88
+ # Function to get dataframe from NSE website
89
+ # Data from two pages: NSE Announcements and NSE corporate actions are fetched and hence two dataframes
90
+ def get_pd(d):
91
+
92
+ # Get the current date
93
+ current_date = datetime.now()
94
+
95
+ # Get the previous day
96
+ previous_day = current_date - timedelta(days=d)
97
+
98
+ # Format the dates in the required format (dd-mm-yyyy)
99
+ current_date_str = current_date.strftime("%d-%m-%Y")
100
+ previous_day_str = previous_day.strftime("%d-%m-%Y")
101
+
102
+
103
+ base_url = 'https://www.nseindia.com'
104
+ session = requests.Session()
105
+ headers = {
106
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, '
107
+ 'like Gecko) '
108
+ 'Chrome/80.0.3987.149 Safari/537.36',
109
+ 'accept-language': 'en,gu;q=0.9,hi;q=0.8',
110
+ 'accept-encoding': 'gzip, deflate, br'}
111
+
112
+ r = session.get(base_url, headers=headers, timeout=120)
113
+ cookies = dict(r.cookies)
114
+ # Use the dates in the URL
115
+ url1 = f"https://www.nseindia.com/api/corporate-announcements?index=equities&from_date={previous_day_str}&to_date={current_date_str}&csv=true"
116
+
117
+ url2 = f"https://www.nseindia.com/api/corporates-corporateActions?index=equities&csv=true"
118
+
119
+ response1 = session.get(url1, timeout=120, headers=headers, cookies=cookies)
120
+ response2 = session.get(url2, timeout=120, headers=headers, cookies=cookies)
121
+
122
+ content1 = response1.content
123
+ content2 = response2.content
124
+ df=pd.read_csv(io.StringIO(content1.decode('utf-8')))
125
+
126
+ dfca=pd.read_csv(io.StringIO(content2.decode('utf-8')))
127
+ return df, dfca
128
+
129
+
130
+ # Process the datafrme and store it as CSV files
131
+ # To increase the speed of prcocessing in RAG, I decided to use three separate vectostores
132
+ # First vector store will all data, second store with minimum data and third one with CA related data
133
+ # Owing to context window problem of RAG, it is always good to ensure that we don't have any irrelevant data
134
+ df_old, dfca = get_pd(1)
135
+ df_back = df_old.copy()
136
+ df_back.to_csv("df_backup.csv",index=False)
137
+ df_old.drop(['RECEIPT','DISSEMINATION','DIFFERENCE'],axis=1,inplace=True)
138
+ df_old2 = df_old.drop(['ATTACHMENT'],axis=1)
139
+ # Save it as a CSV file
140
+ df_old.to_csv("nse_data_old.csv", index=False)
141
+ # df_old1.to_csv("nse_data_old1.csv", index=False)
142
+ df_old2.to_csv("nse_data_old2.csv", index=False)
143
+ dfca.to_csv("nse_ca.csv", index=False)
144
+
145
+
146
+ # Use Langchain CSV Loaders to load the CSV data
147
+ loader = CSVLoader("nse_data_old.csv")
148
+ data_old = loader.load()
149
+ loader2 = CSVLoader("nse_data_old2.csv")
150
+ data_old_2 = loader2.load()
151
+ loader3 = CSVLoader("nse_ca.csv")
152
+ data_ca = loader3.load()
153
+
154
+ global vectorstore,vectorstore2,vectorstore3, colist, colist_tracked
155
+ # Create vectorstores - I tried Chroma but FAISS turned out to be successful
156
+ vectorstore = FAISS.from_documents(data_old, embedding_function)
157
+ vectorstore2 = FAISS.from_documents(data_old_2, embedding_function)
158
+ vectorstore3 = FAISS.from_documents(data_ca, embedding_function)
159
+ vectorstore.save_local("vectorstore")
160
+ vectorstore2.save_local("vectorstore2")
161
+ vectorstore3.save_local("vectorstore3")
162
+
163
+ ###########################
164
+ # Create company list
165
+
166
+ # Upload the NIFTY company names - this is currently hardcoded as NIFTY does not change as often but can be made dynamic
167
+ co1 = pd.read_csv('ind_nifty50list.csv')
168
+
169
+ # Create company lists required
170
+ # Get the column you want to convert to a list
171
+ column_name = "Company Name"
172
+
173
+ # # Convert the column to a list
174
+ co_list1 = co1[column_name].tolist()
175
+
176
+ # # These are the companies that are being tracked - this can be uploaded / hardcoded
177
+ co_list_tracked = ['Reliance Industries Limited', 'Infosys Limited','ICICI Bank Ltd', 'Indusind Bank Ltd','Ramco Systems', \
178
+ 'Zydus Lifesciences Limited','Bharti Airtel Limited',\
179
+ 'ICICI Bank Limited','TechMahindra Limited', 'Indiabulls Real Estate Limited','Tamilnad Mercanitle Bank Limited', \
180
+ 'Bajaj Finance Limited', 'Apollo Tyres Limited', 'Zydus Lifesciences Limited', 'Indusind Bank Limited', 'Kirloskar Oil Engines Limited']
181
+
182
+ co_list = co_list1 + co_list_tracked
183
+
184
+
185
+
186
+ ####################################
187
+
188
+ ##################################
189
+ # Let us create some functions required
190
+ ##################################
191
+
192
+ # LLM function to get announcement detail
193
+
194
+ def give_announcement(llm,stock):
195
+ if not stock:
196
+ return "This company has not made any announcements today or yesterday"
197
+
198
+ else:
199
+
200
+
201
+ retriever1 = vectorstore.as_retriever()
202
+ qa_chain = RetrievalQA.from_chain_type(llm,
203
+ retriever=retriever1,
204
+ return_source_documents=False)
205
+
206
+ response = qa_chain({"query":f"What are the announcements made by the company {stock}?. If no announcement has been made by that company, \
207
+ just say that no announcement has been made by that company."})
208
+ return f"Announcements made by {stock}: {response['result']}"
209
+
210
+ # LLM function to get Corporate Action Detail
211
+ def get_ca(llm,stock):
212
+ # stock = stock_name
213
+ if not stock:
214
+ return "This company has not made any announcements today or yesterday"
215
+
216
+ else:
217
+
218
+ # resp1 = llm.invoke(f"get all the yahoo finance company name(s) of entity name in {stock}. Just print the ticker(s) alone. Do not print leading sentences.")
219
+ # stock = resp1.content
220
+
221
+
222
+ retriever3 = vectorstore3.as_retriever()
223
+ qa_chain2 = RetrievalQA.from_chain_type(llm,
224
+ retriever=retriever3,
225
+ return_source_documents=False)
226
+
227
+ response = qa_chain2({"query":f"What are the corporate action announcements made by the company {stock}?. If no announcement has been made by that company, do not print any source documents and \
228
+ just say that no announcement has been made by that company."})
229
+ return response['result']#, response['source_documents']
230
+
231
+ # a web search tool
232
+ search=DuckDuckGoSearchRun()
233
+
234
+ # Fetch stock data from Yahoo Finance
235
+ def get_stock_price(ticker,history=5):
236
+ # time.sleep(4) #To avoid rate limit error
237
+ if "." in ticker:
238
+ ticker=ticker.split(".")[0]
239
+ ticker=ticker+".NS"
240
+ stock = yf.Ticker(ticker)
241
+ df = stock.history(period="1y")
242
+ df=df[["Close","Volume"]]
243
+ df.index=[str(x).split()[0] for x in list(df.index)]
244
+ df.index.rename("Date",inplace=True)
245
+ df=df[-history:]
246
+ # print(df.columns)
247
+ return df.to_string()
248
+
249
+ # get stock price movements
250
+ def get_movements(llm,stock):
251
+ if not stock:
252
+ return "This company has not made any announcements today or yesterday"
253
+ else:
254
+
255
+ stock = stock[0]
256
+
257
+ dfc = pd.read_csv('nse_data_old.csv')
258
+
259
+ stockdesc = dfc[dfc['COMPANY NAME'] == stock]['COMPANY NAME'].iloc[0]
260
+ stock1 = dfc[dfc['COMPANY NAME'] == stock]['SYMBOL'].iloc[0]
261
+ stock = get_ticker(stock1)
262
+
263
+ print("stock is ",stock)
264
+
265
+ tools=[
266
+ Tool(
267
+ name="get stock data",
268
+ func=get_stock_price,
269
+ description=f"Use this tool to get stock price data. This tool will return three values: date, volume and closing price of the stock \
270
+ for the period of 5 days. stock = {stock}"
271
+ ),
272
+
273
+ Tool(
274
+ name="DuckDuckGo Search",
275
+ func=search.run,
276
+ description=f"Use this tool for for web search for searching details about stock like broker sentiment. You can also get recent stock \
277
+ related news. stock symbol = {stock} and stockname = {stockdesc}"
278
+ ),
279
+
280
+ ]
281
+
282
+ prompt = ChatPromptTemplate.from_messages(
283
+ [
284
+ (
285
+ "system",
286
+ "You are a helpful stock market analysis assistant. Make sure to use the tools given for information.",
287
+ ),
288
+ ("placeholder", "{chat_history}"),
289
+ ("human", "{input}"),
290
+ ("placeholder", "{agent_scratchpad}"),
291
+ ]
292
+ )
293
+
294
+ # Construct the Tools agent
295
+ agent = create_tool_calling_agent(llm, tools, prompt)
296
+
297
+ # Create an agent executor by passing in the agent and tools
298
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
299
+ response = agent_executor.invoke({"input": f"How much the stock price of stock {stock} with name {stockdesc} moved in the last few days?. Give the prices \
300
+ over the last few days and also percentage change. For example, If the stock has not moved in single direction, \
301
+ you can say the stock has been volatile. But if it has moved up over five days, you can say so with percentage movement"})
302
+ return f"Answer for {stock} - {response['output']}"
303
+
304
+
305
+ #####################################
306
+ # get stock sentiments
307
+ #####################################
308
+
309
+ prompt1 = """Hello, I need broker sentiment data for a specific stock. Please search and summarize current market analyses, broker reports, \
310
+ and overall sentiment regarding the given stock:\Focus on information from credible sources like financial news, broker reports, and investment research firms. \
311
+ Provide key insights, including:\
312
+ Recent broker recommendations (buy, hold, sell), \
313
+ Notable broker analyses or reports, \
314
+ General trends in broker sentiment, \
315
+ Any major news or events impacting the stock's sentiment. \
316
+ Please ensure the data is up-to-date and from reputable sources. Provide a concise summary with relevant details and any supporting context to understand the current sentiment.\
317
+ Please note that you are not chat agent, but meant for single usage, so do not conclude with any greetings or asking for further assistance etc!.\
318
+ """
319
+
320
+ def get_sentiments(llm,stock):
321
+ if not stock:
322
+ return "This company has not made any announcements today or yesterday"
323
+ else:
324
+ print("st1",stock)
325
+ stock = stock[0]
326
+ print("af ",stock)
327
+ #####
328
+ dfc = pd.read_csv('nse_data_old.csv')
329
+
330
+ stockdesc = dfc[dfc['COMPANY NAME'] == stock]['COMPANY NAME'].iloc[0]
331
+ stock1 = dfc[dfc['COMPANY NAME'] == stock]['SYMBOL'].iloc[0]
332
+ stock = get_ticker(stock1)
333
+ tools=[
334
+ Tool(
335
+ name="get stock data",
336
+ func=get_stock_price,
337
+ description=f"Use this tool to get stock price data. This tool will return three values: date, volume and closing price of the stock \
338
+ for the period of 5 days. stock = {stock}"
339
+ ),
340
+
341
+ Tool(
342
+ name="DuckDuckGo Search",
343
+ func=search.run,
344
+ description=f"Use this tool for for web search for searching details about stock like broker sentiment. You can also get recent stock \
345
+ related news. stock name = {stockdesc}"
346
+ ),
347
+
348
+ ]
349
+
350
+ prompt = ChatPromptTemplate.from_messages(
351
+ [
352
+ (
353
+ "system",
354
+ f"{prompt1}",
355
+ ),
356
+ ("placeholder", "{chat_history}"),
357
+ ("human", "{input}"),
358
+ ("placeholder", "{agent_scratchpad}"),
359
+ ]
360
+ )
361
+
362
+ # Construct the Tools agent
363
+ # agent = create_tool_calling_agent(llm, tools, prompt)
364
+
365
+ # Create an agent executor by passing in the agent and tools
366
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
367
+ try:
368
+ agent = create_tool_calling_agent(llm, tools, prompt)
369
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
370
+ response = agent_executor.invoke({"input": f"Get broker sentiment for the stock {stock} and stock name {stockdesc}"})
371
+ return f"Broker sentiment analysis for {stock}. - {response['output']}"
372
+ except Exception as e:
373
+ return f"An error occurred: {str(e)}"
374
+ #################
375
+
376
+ # Fetch financial statements from Yahoo Finance
377
+ def get_balancesheet(ticker):
378
+ # time.sleep(4) #To avoid rate limit error
379
+ if "." in ticker:
380
+ ticker=ticker.split(".")[0]
381
+ else:
382
+ ticker=ticker
383
+ ticker=ticker+".NS"
384
+ company = yf.Ticker(ticker)
385
+ df = company.balance_sheet
386
+ # df = df.head(30)
387
+ df.fillna(method='ffill',inplace=True)
388
+ df.dropna(inplace=True)
389
+ return df
390
+
391
+
392
+ def get_incomestatement(ticker):
393
+ # time.sleep(4) #To avoid rate limit error
394
+ if "." in ticker:
395
+ ticker=ticker.split(".")[0]
396
+ else:
397
+ ticker=ticker
398
+ ticker=ticker+".NS"
399
+ company = yf.Ticker(ticker)
400
+ df = company.financials
401
+ # df = df.head(30)
402
+ df.fillna(method='ffill',inplace=True)
403
+ df.dropna(inplace=True)
404
+ return df
405
+
406
+ def get_ticker(company_name):
407
+ com=company_name+".NS"
408
+ ticker = yf.Ticker(com)
409
+ return ticker.info['symbol']
410
+
411
+ def get_financialratio(model, input,stock):
412
+
413
+ stock_name = get_companynames(stock)
414
+
415
+ llm = get_model(model)
416
+
417
+ if not stock_name:
418
+ return "This company has not made any announcements"
419
+ else:
420
+
421
+ stockname = stock_name[0]
422
+
423
+ print("stock1 ",stockname)
424
+
425
+ dfc = pd.read_csv('nse_data_old.csv')
426
+
427
+ stock1 = dfc[dfc['COMPANY NAME'] == stockname]['SYMBOL'].iloc[0]
428
+
429
+ print("staock1 ",stock1)
430
+
431
+ stock = get_ticker(stock1)
432
+
433
+ print("stock is ",stock)
434
+
435
+ if input == '':
436
+ return "No query has been entered!"
437
+
438
+ else:
439
+ resp = llm.invoke(f"You have to answer either 'A' or 'B' without any leading sentences - check whether the input {input} pertains \
440
+ to financial ratio query. If it pertains to financial ratio query, \
441
+ respond with letter 'A', else with letter 'B' if it contains only something like company name")
442
+ print("nature of query ",resp)
443
+ if resp.content == 'B':
444
+ return "Enter a query pertaining to financial ratios!"
445
+ else:
446
+
447
+ # resp1 = llm.invoke(f"get yahoo finance ticker name of entity name in {input}. Just print the ticker alone. Do not print leading sentences.")
448
+ # stock = resp1.content
449
+
450
+ # resp2 = llm.invoke(f"to answer the query {input}, whether balance sheet or income statement required? If balance sheet, answer A, else B")
451
+ resp2 = llm.invoke(f"Answer A, if balance sheet or B, if income statement. To answer the query {input}, \
452
+ whether balance sheet or income statement required - If balance sheet, answer A, else B")
453
+
454
+
455
+ if resp2.content=='A':
456
+ df1 = get_balancesheet(f'{stock}')
457
+ print("balance sheet")
458
+ else:
459
+ df1 = get_incomestatement(f'{stock}')
460
+ print("income statement")
461
+
462
+ df=df1.T
463
+
464
+ print("the df is ",df)
465
+
466
+ cols= df.columns.tolist()
467
+
468
+ resp3 = llm.invoke(f"List the column names, as python list, in {cols} needed for {input} calculation. Do not output any sentence other than column names.\
469
+ For example, do not output leading answer statements like: Here are the column names needed for ..")
470
+ message=resp3.content
471
+
472
+ def extract_df(df, message):
473
+ c = ast.literal_eval(message)
474
+ return df[c]
475
+
476
+ df_new=extract_df(df,message)
477
+
478
+ # prompt1 = f"List the column names, as python list, in {cols} needed for {data} calculation. Do not output any sentence other than column names.\
479
+ # For example, do not output leading answer statements like: Here are the column names needed for .."
480
+
481
+ # prompt = f"What is the current ratio of {stock}?. Use {df_new}. Give only year and current ratio for that year in JSON format"
482
+
483
+ parser = JsonOutputParser()
484
+
485
+
486
+ prompt = PromptTemplate(
487
+ template="Answer the user query.\n{format_instructions}\n{query}\n",
488
+ input_variables=["query"],
489
+ partial_variables={"format_instructions": parser.get_format_instructions()},
490
+ )
491
+
492
+ # prompt = ChatPromptTemplate.from_messages(
493
+ # [
494
+ # (
495
+ # "system",
496
+ # "You are a helpful financial data analysis assistant.",
497
+ # ),
498
+ # ("placeholder", "{chat_history}"),
499
+ # ("human", f"Answer the user using df_new and input: question:{input}, dataframe: {df_new}, \
500
+ # format_instructions: parser.get_format_instructions()"\
501
+ # ),
502
+ # ("placeholder", "{agent_scratchpad}"),
503
+ # ]
504
+ # )
505
+
506
+ chain = prompt | llm | parser
507
+
508
+ try:
509
+ response= chain.invoke( f"Using {df_new}, {input}?")
510
+ # Print only the results. Print the output in Json format.")
511
+ return f"For the company: {stockname}, Here are the details: {response}"
512
+ except Exception as e:
513
+ return f"An error occurred: {str(e)}"
514
+
515
+ ##########################
516
+ # Functions to plot a chart over ratios - this has scope for major enhancements!
517
+ def plot_chart(data):
518
+ # Load the JSON string into a Python object
519
+ # data = json.loads(json_str)
520
+
521
+ # Get the first key in the dictionary
522
+ try:
523
+ key = list(data.keys())[0]
524
+ # Create a plot
525
+ plt.figure(figsize=(8, 6))
526
+ plt.bar(data[key].keys(), data[key].values())
527
+ plt.title(f"{key} Over Years")
528
+ plt.xlabel("Year")
529
+ plt.ylabel(key)
530
+ plt.tight_layout()
531
+
532
+ # Return the plot
533
+ return plt
534
+
535
+ except Exception as e:
536
+ return None
537
+
538
+ # def get_chart(input):
539
+ # response = get_financialratio(model,input)
540
+ # plt = plot_chart(response)
541
+ # return plt
542
+
543
+ def get_chart(model,input,stock):
544
+ stock_name = get_companynames(stock)
545
+ if stock_name:
546
+
547
+ response = get_financialratio(model,input,stock)
548
+ # Extract the dictionary part using regex
549
+ dict_match = re.search(r"\{.*\}", response) # Search for content within curly braces
550
+
551
+ # Convert the extracted string to a dictionary
552
+ if dict_match:
553
+ extracted_dict_str = dict_match.group(0) # Get the matching text
554
+ extracted_dict = ast.literal_eval(extracted_dict_str) # Convert string to dictionary
555
+ else:
556
+ extracted_dict = None # No dictionary found
557
+ print("extrated tic ", extracted_dict)
558
+ plt = plot_chart(extracted_dict)
559
+ return plt
560
+ else: return None
561
+
562
+
563
+ def combined_ratio(model, input,stock):
564
+ return get_financialratio(model,input,stock), get_chart(model, input,stock)
565
+
566
+ ###############################
567
+
568
+
569
+ ###############################
570
+
571
+ # Create the Gradio Blocks interface with a title and description
572
+
573
+ ##################################
574
+ global flag
575
+ def incremental_process():
576
+ global vectorstore,vectorstore2,vectorstore3, flag
577
+
578
+ try:
579
+ df_new, _ = get_pd(1)
580
+ flag = 0
581
+ except:
582
+ df_new = pd.read_csv("df_backup.csv")
583
+ flag = 1
584
+
585
+ df_new.to_csv("df_new.csv",index=False)
586
+ print("length of df_new ",len(df_new))
587
+ print("length of df_old ", len(df_old))
588
+
589
+ #drop unnecessary common columns
590
+ df_new.drop(['RECEIPT','DISSEMINATION','DIFFERENCE'],axis=1,inplace=True)
591
+
592
+ # #find the difference and add incrementally for first store
593
+ df_merged = df_new.merge(df_old, how='left', indicator=True)
594
+ # Filter rows that are unique to 'n' (i.e., where '_merge' is 'left_only')
595
+ df_add1= df_merged[df_merged['_merge'] == 'left_only'].drop(columns=['_merge'])
596
+
597
+ # Save it as a CSV file
598
+ df_add1.to_csv("nse_data_add1.csv", index=False)
599
+
600
+ #drop unnecessary columns for second vector store
601
+ df_new2 = df_new.drop(['ATTACHMENT'],axis=1)
602
+
603
+ # add increment for second store
604
+ df_merged = df_new2.merge(df_old2, how='left', indicator=True)
605
+ df_add2 = df_merged[df_merged['_merge'] == 'left_only'].drop(columns=['_merge'])
606
+ # Save it as a CSV file
607
+ df_add2.to_csv("nse_data_add2.csv", index=False)
608
+
609
+ #####################
610
+
611
+ # Load the first CSV file
612
+ dfold = pd.read_csv('nse_data_old.csv')
613
+
614
+ # Load the second CSV file
615
+ dfadd = pd.read_csv('nse_data_add1.csv')
616
+
617
+ # print("df old",dfold)
618
+ # print("######")
619
+ # print("df add ",dfadd)
620
+
621
+ if dfadd.empty:
622
+ dfco = dfold.copy()
623
+ else:
624
+ # Append df2 at the end of df1
625
+ dfco = pd.concat([dfold, dfadd], ignore_index=True)
626
+
627
+ dfco.to_csv("dfco.csv",index=False)
628
+
629
+ # Here incremental RAG is achieved by adding additional data dynamically to vectorstore
630
+ loader = CSVLoader("nse_data_add1.csv")
631
+ data_new1 = loader.load()
632
+
633
+ loader = CSVLoader("nse_data_add2.csv")
634
+ data_new2 = loader.load()
635
+
636
+ print("original size ",vectorstore.index.ntotal)
637
+
638
+ len1 = len(pd.read_csv('nse_data_old.csv')) + len(pd.read_csv('nse_data_add1.csv'))
639
+ print("len1 old + new csv ",len1)
640
+
641
+ len2 = vectorstore.index.ntotal
642
+
643
+ if len1!=len2:
644
+
645
+ print("old size ",vectorstore.index.ntotal)
646
+
647
+ # for first store
648
+ vectorstore_add1 = FAISS.from_documents(data_new1, embedding_function)
649
+ print("incremental size ",vectorstore_add1.index.ntotal)
650
+ vectorstore_new1 = FAISS.load_local("vectorstore",embedding_function,allow_dangerous_deserialization=True)
651
+ vectorstore_new1.merge_from(vectorstore_add1)
652
+ vectorstore_new1.save_local("vectorstore")
653
+ print("new size ",vectorstore_new1.index.ntotal)
654
+ print("new old size ",vectorstore.index.ntotal)
655
+ # retrieverx = vectorstore_new.as_retriever()
656
+
657
+ # for second store
658
+ vectorstore_add2 = FAISS.from_documents(data_new2, embedding_function)
659
+ print("incremental size ",vectorstore_add2.index.ntotal)
660
+ vectorstore_new2 = FAISS.load_local("vectorstore2",embedding_function,allow_dangerous_deserialization=True)
661
+ vectorstore_new2.merge_from(vectorstore_add2)
662
+ vectorstore_new2.save_local("vectorstore2")
663
+ print("new size ",vectorstore_new2.index.ntotal)
664
+ print("new old size ",vectorstore2.index.ntotal)
665
+ # retrieverx = vectorstore_new2.as_retriever()
666
+
667
+ ##########################
668
+ # Define updated vector stores, retrievers and QA chains
669
+ ##########################
670
+
671
+ vectorstore = FAISS.load_local("vectorstore",embedding_function,allow_dangerous_deserialization=True)
672
+ print("final size store 1",vectorstore.index.ntotal)
673
+
674
+ vectorstore2 = FAISS.load_local("vectorstore2",embedding_function,allow_dangerous_deserialization=True)
675
+ print("final size store 2",vectorstore2.index.ntotal)
676
+
677
+ vectorstore3 = FAISS.load_local("vectorstore3",embedding_function,allow_dangerous_deserialization=True)
678
+ print("final size store 3",vectorstore3.index.ntotal)
679
+
680
+ return flag
681
+
682
+
683
+ def get_colist2():
684
+ dfco = pd.read_csv('dfco.csv')
685
+ dfco1 = dfco[['COMPANY NAME']]
686
+ dfco2 = dfco1.drop_duplicates()
687
+ # Save the result to a new CSV file
688
+ dfco2.to_csv('companies.csv', index=False)
689
+
690
+ dfco3 = dfco2.head(10)
691
+
692
+ co_list3 = dfco3['COMPANY NAME'].unique().tolist()
693
+
694
+ filtered_df = dfco2[dfco2['COMPANY NAME'].isin(co_list)]
695
+
696
+ co_list2 = filtered_df['COMPANY NAME'].tolist()
697
+ return co_list2, co_list3
698
+
699
+ def get_timestampmessage(flag):
700
+ dfco = pd.read_csv('dfco.csv')
701
+ timestamp = dfco[['BROADCAST DATE/TIME']].max().values.tolist()[0]
702
+ if flag == 1:
703
+ message = f"There is NSE timeout error. The latest filing information is available upto {timestamp}"
704
+ else: message = f"Lastest filing information is available upto {timestamp}"
705
+ return message
706
+
707
+
708
+ def update():
709
+ global flag
710
+ flag = incremental_process()
711
+ message = get_timestampmessage(flag)
712
+ return message
713
+
714
+ def give_time():
715
+ dfco = pd.read_csv("dfco.csv")
716
+ timestamp = dfco[['BROADCAST DATE/TIME']].max().values.tolist()[0]
717
+ return timestamp
718
+
719
+
720
+
721
+ # Define the IST timezone
722
+ ist_timezone = pytz.timezone("Asia/Kolkata")
723
+ # Define UTC for server-side time
724
+ utc_timezone = pytz.utc
725
+
726
+ def refresh():
727
+ # Get the client-side timestamp (assuming it is in IST)
728
+ timestamp_str = give_time() # The format returned should match the expected format
729
+ given_time = datetime.strptime(timestamp_str, "%d-%b-%Y %H:%M:%S")
730
+ given_time_ist = ist_timezone.localize(given_time) # Localize to IST
731
+
732
+ # Get the current server time in UTC
733
+ current_time_utc = datetime.now(tz=utc_timezone)
734
+
735
+ # Convert the client-side time to UTC for consistent comparison
736
+ given_time_utc = given_time_ist.astimezone(utc_timezone)
737
+
738
+ # Calculate the time difference
739
+ time_difference = current_time_utc - given_time_utc
740
+
741
+ print("the time diff is ", time_difference)
742
+
743
+ # Check if the time difference is greater than one hour
744
+ if time_difference > timedelta(hours=1):
745
+ message1 = update()
746
+ print("Incremental update run")
747
+ else:
748
+ message1 = f"Refresh allowed only if data is stale for more than one hour. Current client timestamp: {timestamp_str}"
749
+
750
+ return message1
751
+ ##########################################################################
752
+
753
+
754
+ def plot1_top_20():
755
+ df = pd.read_csv('nse_data_old.csv')
756
+ subjects = ['Acquisition',
757
+ 'Alteration Of Capital and Fund Raising-XBRL',
758
+ 'Analysts/Institutional Investor Meet/Con. Call Updates',
759
+ 'Board Meeting Intimation',
760
+ 'Book Closure',
761
+ 'Change in Directors/ Key Managerial Personnel/ Auditor/ Compliance Officer/ Share Transfer Agent',
762
+ 'Change in Management',
763
+ 'Credit Rating',
764
+ 'Disclosure of material issue',
765
+ 'Dividend',
766
+ 'Financial Result Updates',
767
+ 'Investor Presentation',
768
+ 'Notice Of Shareholders Meetings-XBRL',
769
+ 'Related Party Transactions',
770
+ 'Resignation',
771
+ 'Rights Issue',
772
+ 'Shareholders meeting',
773
+ 'Spurt in Volume',
774
+ 'Update-Acquisition/Scheme/Sale/Disposal-XBRL',
775
+ ]
776
+
777
+ # companies = co_list2
778
+ # df = df[df['COMPANY NAME'].isin(co_list2)]
779
+ df = df[df['SUBJECT'].isin(subjects)]
780
+ # df['SUBJECT'] = df['SUBJECT'].replace('Change in Directors/ Key Managerial Personnel/ Auditor/ Compliance Officer/ Share Transfer Agent', 'Change in Key Managerial Personnel')
781
+
782
+ df['SUBJECT'] = df['SUBJECT'].replace('Change in Directors/ Key Managerial Personnel/ Auditor/ Compliance Officer/ Share Transfer Agent', 'Change in Key Managerial Personnel')
783
+ value_counts = df['SUBJECT'].value_counts()
784
+
785
+ # Get the top 10 labels by count
786
+ # top_20_value_counts = value_counts[:20]
787
+
788
+ plt.figure(figsize=(10, 6))
789
+ plt.barh(value_counts.index, value_counts.values)
790
+ plt.xlabel('Count')
791
+ plt.ylabel('Announcements')
792
+ plt.title('NSE Corporate Announcements - A Glance')
793
+ plt.tight_layout()
794
+ # plt.close()
795
+ return plt
796
+
797
+ ## Function to create company list specific chart
798
+ def plot2_top_20():
799
+ co_list2,_ = get_colist2()
800
+
801
+ # global co_list2
802
+ # Get the counts of each label
803
+ df = pd.read_csv('nse_data_old.csv')
804
+ subjects = ['Acquisition',
805
+ 'Alteration Of Capital and Fund Raising-XBRL',
806
+ 'Analysts/Institutional Investor Meet/Con. Call Updates',
807
+ 'Board Meeting Intimation',
808
+ 'Book Closure',
809
+ 'Change in Directors/ Key Managerial Personnel/ Auditor/ Compliance Officer/ Share Transfer Agent',
810
+ 'Change in Management',
811
+ 'Credit Rating',
812
+ 'Disclosure of material issue',
813
+ 'Dividend',
814
+ 'Financial Result Updates',
815
+ 'Investor Presentation',
816
+ 'Notice Of Shareholders Meetings-XBRL',
817
+ 'Related Party Transactions',
818
+ 'Resignation',
819
+ 'Rights Issue',
820
+ 'Shareholders meeting',
821
+ 'Spurt in Volume',
822
+ 'Update-Acquisition/Scheme/Sale/Disposal-XBRL',
823
+ ]
824
+
825
+ # companies = co_list2
826
+
827
+ df = df[df['COMPANY NAME'].isin(co_list2)]
828
+ # df = df[df['COMPANY NAME'].isin(co_list_tracked)]
829
+ # df = df[df['SUBJECT'].isin(subjects)]
830
+ # df['SUBJECT'] = df['SUBJECT'].replace('Change in Directors/ Key Managerial Personnel/ Auditor/ Compliance Officer/ Share Transfer Agent', 'Change in Key Managerial Personnel')
831
+
832
+ df['SUBJECT'] = df['SUBJECT'].replace('Change in Directors/ Key Managerial Personnel/ Auditor/ Compliance Officer/ Share Transfer Agent', 'Change in Key Managerial Personnel')
833
+ value_counts = df['SUBJECT'].value_counts()
834
+
835
+ # Get the top 10 labels by count
836
+ # top_20_value_counts = value_counts[:20]
837
+
838
+ plt.figure(figsize=(10, 6))
839
+ plt.barh(value_counts.index, value_counts.values)
840
+ plt.xlabel('Count')
841
+ plt.ylabel('Announcements')
842
+ plt.title('NSE Corporate Announcements - Tracked Companies')
843
+ plt.tight_layout()
844
+ # plt.close()
845
+ return plt
846
+
847
+ def get_companynames(stock):
848
+ df = pd.read_csv('nse_data_old.csv')
849
+ if stock:
850
+
851
+ # Create a regular expression pattern
852
+ pattern = f'.*{stock}.*'
853
+
854
+ # Get rows where 'COMPANY NAME' contains the keyword (case-insensitive)
855
+ matched_rows = df[df['COMPANY NAME'].str.contains(pattern, case=False)]
856
+
857
+ # Get unique company names
858
+ unique_companies = matched_rows['COMPANY NAME'].unique()
859
+
860
+ return list(set(unique_companies))
861
+ else: return None
862
+
863
+ # A combined function to be used in Gradio output box
864
+ def print_model(llm):
865
+ co_list2,_ = get_colist2()
866
+ if co_list2:
867
+ return f"You are using {llm.model_name} model for this session. \n \n" \
868
+ f"These are the companies you track: {co_list_tracked}. \n \n" \
869
+ f"These are the companies, including those in NIFTY, that have filed any information with NSE either today / yesterday - {co_list2}"
870
+ else:
871
+ return f"You are using {llm.model_name} model for this session. \n \n" \
872
+ f"Your are tracking these companies: {co_list_tracked}, \n \n"\
873
+ f"None of the tracked companies or NIFTY 50 have filed any information with NSE on either today or yesterday"
874
+
875
+
876
+ def print_model1(llm):
877
+ return f"You are using {llm.model_name} model for this session. \n \n [Note: There is NSE timeout error preventing fetching of latest data. So, results may not be real-time / up-to-date]"
878
+
879
+
880
+ def combined_function1(model,stock):
881
+ global flag
882
+ llm = get_model(model)
883
+ stock = get_companynames(stock)
884
+ if flag == 0:
885
+ return print_model(llm), give_announcement(llm,stock),get_ca(llm,stock),get_movements(llm,stock), get_sentiments(llm,stock)
886
+ else:
887
+ return print_model1(llm), give_announcement(llm,stock),get_ca(llm,stock),get_movements(llm,stock), get_sentiments(llm,stock)
888
+
889
+ def get_model(model_name):
890
+ llm = ChatGroq(
891
+ api_key="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",
892
+ model=model_name,
893
+ max_tokens = 8192,
894
+ # model = 'gemma-7b-it',
895
+ temperature = 0
896
+ # model = 'mixtral-8x7B-32768'
897
+ )
898
+ return llm
899
+
900
+
901
+ # This function is given here as company list is dynamic
902
+ def give_names():
903
+ global co_list_tracked
904
+ co_list2, co_list3 = get_colist2()
905
+ return f"Apart from NIFTY, these are the companies you track: \n \n" \
906
+ f" {co_list_tracked}. \n \n" \
907
+ f"These are the tracked companies that have made announcements: \n \n" \
908
+ f"{co_list2}. \n \n" \
909
+ f"These are latest 10 companies that have made announcements: \n \n " \
910
+ f"{co_list3}"
911
+
912
+
913
+ ##############################
914
+ retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
915
+ ###############################
916
+
917
+ # This function is for chat queries. Given here due to retriever defined here
918
+ def chat_chain(model,query):
919
+ llm = get_model(model)
920
+ if query=='':
921
+ return "Please enter a query!"
922
+ else:
923
+ combine_docs_chain = create_stuff_documents_chain(
924
+ llm, retrieval_qa_chat_prompt)
925
+ retriever2 = vectorstore2.as_retriever()
926
+ retrieval_chain = create_retrieval_chain(retriever2, combine_docs_chain)
927
+ response = retrieval_chain.invoke({"input": query})
928
+ return response['answer']
929
+
930
+
931
+ #################################
932
+ ## Update the vectorstate with latest data
933
+ flag = incremental_process()
934
+ ###########################################################################
935
+
936
+ with gr.Blocks() as demo:
937
+
938
+ # Add a Markdown block for the description
939
+ gr.Markdown("""<h1 style='color: blue;'>Chat and Analyze with NSE Filings Information</h1>""")
940
+ gr.Markdown("""Powered by Gradio, Groq, Llama3, FAISS, Langchain, YahooFinance""")
941
+ gr.Markdown(
942
+ """
943
+ <img src="https://upload.wikimedia.org/wikipedia/commons/1/12/NSE_Exchange_Plaza.jpg" width=500px>
944
+ Enter any company name to know its recent filings with NSE in real time. This app can track a list of companies for any corporate announcements \
945
+ with NSE (now NSE 50 hard coded). If you want to know whether any of the tracked company has made any announcements either yesterday or today,\
946
+ enter the company name and submit. The first output box will list all the companies (that are tracked and) that have made an announcement today. \
947
+ The second box provides details about the announcement. You can also do ratio analysis and chat with the filings information (beta).
948
+ """
949
+ )
950
+
951
+ txt_output = gr.Text(give_time(),label = "Opening Data - Timestamp of latest Filing")
952
+ txt_output = gr.Text(give_names(),label = "Announcements for tracked companies")
953
+
954
+ # This is for defaulting charts when app is launched
955
+ plot_output1 = gr.Plot(plot1_top_20(), label="Chart") # Call the function to create the plot
956
+ plt.close()
957
+ plot_output2 = gr.Plot(plot2_top_20(), label="Chart") # Call the function to create the plot
958
+ plt.close()
959
+ gr.Markdown("""<h2 style='color: blue;'>Fetch Announcements/Corporate Actions/Price Movements/Broker Sentiments</h2>""")
960
+ # Use a Column to structure the inputs and outputs
961
+ with gr.Column():
962
+ outputs5 = [gr.Textbox(label="Latest Filing Timestamp",placeholder="Refresh data if stale for more than an hour")]
963
+ button5 = gr.Button("Refresh Data")
964
+ # button5.click(lambda: refresh(dfco), inputs=None, outputs=outputs5)
965
+ button5.click(lambda: refresh(), inputs=None, outputs=outputs5)
966
+
967
+ # Create a dropdown box for selecting the operation
968
+ operation_dropdown = gr.Dropdown(
969
+ label="Select a model",
970
+ choices=['llama3-70b-8192','llama3-8b-8192', 'gemma-7b-it','mixtral-8x7B-32768' ], # Options for the dropdown
971
+ value='llama3-70b-8192', # Default value
972
+ )
973
+ # First text input and button
974
+ text_input1 = gr.Textbox(
975
+ label="Enter Company Name",
976
+ placeholder="Enter a company name; e.g., Zydus Lifesciences Limited",
977
+ lines=1
978
+ )
979
+ button1 = gr.Button("Start Analysis")
980
+ outputs1 = [
981
+ gr.Textbox(label="Selected Model",show_copy_button=True),
982
+ gr.Textbox(label="Announcement Detail", max_lines=100,show_copy_button=True),
983
+ gr.Textbox(label="Any Corporate Actions during last week?", max_lines=100,show_copy_button=True),
984
+ gr.Textbox(label="Stock Price Movement", max_lines=100,show_copy_button=True),
985
+ gr.Textbox(label="Broker Sentiment", max_lines=100,show_copy_button=True),
986
+ ]
987
+
988
+ button1.click(lambda x,y: combined_function1(x,y), inputs=[operation_dropdown,text_input1], outputs=outputs1)
989
+ gr.Markdown("""<h1 style='color: green;'>Analyse the Financial Statements of the above Company</h1>""")
990
+
991
+ text_input3 = gr.Textbox(
992
+ label="Enter Query",
993
+ placeholder="Enter your query: e.g., What is the current ratio of the stock over three years?",
994
+ lines=1)
995
+
996
+ button3 = gr.Button("Analyse")
997
+ outputs3 = [
998
+ gr.Textbox(label="Chat Response", max_lines=100,show_copy_button=True),
999
+ gr.Plot(label = "Chart")]
1000
+
1001
+
1002
+ button3.click(combined_ratio, inputs=[operation_dropdown,text_input3,text_input1], outputs=outputs3)
1003
+
1004
+ gr.Markdown("""<h1 style='color: orange;'>Chat With the NSE Filings Information</h1>""")
1005
+
1006
+ # Second text input and button
1007
+ text_input2 = gr.Textbox(
1008
+ label="Enter Chat Query",
1009
+ placeholder="Enter your query: e.g., List the companies that have recently made acquisitions",
1010
+ lines=2
1011
+ )
1012
+ button2 = gr.Button("Chat")
1013
+ outputs2 = [gr.Textbox(label="Chat Response", max_lines=100,lines=10,show_copy_button=True)]
1014
+ # gr.Plot(label = "Categories")]
1015
+ button2.click(chat_chain, inputs=[operation_dropdown,text_input2], outputs=outputs2)
1016
+
1017
+ # Launch the Gradio app
1018
+ demo.launch()
1019
+
ind_nifty50list.csv ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Company Name,Industry,Symbol,Series,ISIN Code
2
+ Adani Enterprises Ltd.,Metals & Mining,ADANIENT,EQ,INE423A01024
3
+ Adani Ports and Special Economic Zone Ltd.,Services,ADANIPORTS,EQ,INE742F01042
4
+ Apollo Hospitals Enterprise Ltd.,Healthcare,APOLLOHOSP,EQ,INE437A01024
5
+ Asian Paints Ltd.,Consumer Durables,ASIANPAINT,EQ,INE021A01026
6
+ Axis Bank Ltd.,Financial Services,AXISBANK,EQ,INE238A01034
7
+ Bajaj Auto Ltd.,Automobile and Auto Components,BAJAJ-AUTO,EQ,INE917I01010
8
+ Bajaj Finance Ltd.,Financial Services,BAJFINANCE,EQ,INE296A01024
9
+ Bajaj Finserv Ltd.,Financial Services,BAJAJFINSV,EQ,INE918I01026
10
+ Bharat Petroleum Corporation Ltd.,Oil Gas & Consumable Fuels,BPCL,EQ,INE029A01011
11
+ Bharti Airtel Ltd.,Telecommunication,BHARTIARTL,EQ,INE397D01024
12
+ Britannia Industries Ltd.,Fast Moving Consumer Goods,BRITANNIA,EQ,INE216A01030
13
+ Cipla Ltd.,Healthcare,CIPLA,EQ,INE059A01026
14
+ Coal India Ltd.,Oil Gas & Consumable Fuels,COALINDIA,EQ,INE522F01014
15
+ Divi's Laboratories Ltd.,Healthcare,DIVISLAB,EQ,INE361B01024
16
+ Dr. Reddy's Laboratories Ltd.,Healthcare,DRREDDY,EQ,INE089A01023
17
+ Eicher Motors Ltd.,Automobile and Auto Components,EICHERMOT,EQ,INE066A01021
18
+ Grasim Industries Ltd.,Construction Materials,GRASIM,EQ,INE047A01021
19
+ HCL Technologies Ltd.,Information Technology,HCLTECH,EQ,INE860A01027
20
+ HDFC Bank Ltd.,Financial Services,HDFCBANK,EQ,INE040A01034
21
+ HDFC Life Insurance Company Ltd.,Financial Services,HDFCLIFE,EQ,INE795G01014
22
+ Hero MotoCorp Ltd.,Automobile and Auto Components,HEROMOTOCO,EQ,INE158A01026
23
+ Hindalco Industries Ltd.,Metals & Mining,HINDALCO,EQ,INE038A01020
24
+ Hindustan Unilever Ltd.,Fast Moving Consumer Goods,HINDUNILVR,EQ,INE030A01027
25
+ ICICI Bank Ltd.,Financial Services,ICICIBANK,EQ,INE090A01021
26
+ ITC Ltd.,Fast Moving Consumer Goods,ITC,EQ,INE154A01025
27
+ IndusInd Bank Ltd.,Financial Services,INDUSINDBK,EQ,INE095A01012
28
+ Infosys Ltd.,Information Technology,INFY,EQ,INE009A01021
29
+ JSW Steel Ltd.,Metals & Mining,JSWSTEEL,EQ,INE019A01038
30
+ Kotak Mahindra Bank Ltd.,Financial Services,KOTAKBANK,EQ,INE237A01028
31
+ LTIMindtree Ltd.,Information Technology,LTIM,EQ,INE214T01019
32
+ Larsen & Toubro Ltd.,Construction,LT,EQ,INE018A01030
33
+ Mahindra & Mahindra Ltd.,Automobile and Auto Components,M&M,EQ,INE101A01026
34
+ Maruti Suzuki India Ltd.,Automobile and Auto Components,MARUTI,EQ,INE585B01010
35
+ NTPC Ltd.,Power,NTPC,EQ,INE733E01010
36
+ Nestle India Ltd.,Fast Moving Consumer Goods,NESTLEIND,EQ,INE239A01024
37
+ Oil & Natural Gas Corporation Ltd.,Oil Gas & Consumable Fuels,ONGC,EQ,INE213A01029
38
+ Power Grid Corporation of India Ltd.,Power,POWERGRID,EQ,INE752E01010
39
+ Reliance Industries Ltd.,Oil Gas & Consumable Fuels,RELIANCE,EQ,INE002A01018
40
+ SBI Life Insurance Company Ltd.,Financial Services,SBILIFE,EQ,INE123W01016
41
+ Shriram Finance Ltd.,Financial Services,SHRIRAMFIN,EQ,INE721A01013
42
+ State Bank of India,Financial Services,SBIN,EQ,INE062A01020
43
+ Sun Pharmaceutical Industries Ltd.,Healthcare,SUNPHARMA,EQ,INE044A01036
44
+ Tata Consultancy Services Ltd.,Information Technology,TCS,EQ,INE467B01029
45
+ Tata Consumer Products Ltd.,Fast Moving Consumer Goods,TATACONSUM,EQ,INE192A01025
46
+ Tata Motors Ltd.,Automobile and Auto Components,TATAMOTORS,EQ,INE155A01022
47
+ Tata Steel Ltd.,Metals & Mining,TATASTEEL,EQ,INE081A01020
48
+ Tech Mahindra Ltd.,Information Technology,TECHM,EQ,INE669C01036
49
+ Titan Company Ltd.,Consumer Durables,TITAN,EQ,INE280A01028
50
+ UltraTech Cement Ltd.,Construction Materials,ULTRACEMCO,EQ,INE481G01011
51
+ Wipro Ltd.,Information Technology,WIPRO,EQ,INE075A01022
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ langchain-groq
3
+ sentence-transformers
4
+ langchainhub
5
+ faiss-cpu
6
+ gradio
7
+ gradio_client
8
+ duckduckgo-search
9
+ yfinance