Rajarshi-Roy-research commited on
Commit
892f454
·
1 Parent(s): bd1ef2f

fixed errors

Browse files
Files changed (13) hide show
  1. .gitattributes +1 -11
  2. Dockerfile +0 -33
  3. LICENSE +0 -24
  4. README.md +1 -12
  5. app copy.py +0 -45
  6. app.py +0 -67
  7. chainlit.md +0 -11
  8. demo_app.py +0 -314
  9. demo_chaillit_app.py +0 -61
  10. demo_main.py +0 -14
  11. requirements.txt +0 -18
  12. setup.py +0 -33
  13. template.py +0 -38
.gitattributes CHANGED
@@ -1,13 +1,3 @@
1
- <<<<<<< HEAD
2
- <<<<<<< HEAD
3
  faiss_index/index.faiss filter=lfs diff=lfs merge=lfs -text
4
  resources/Guide-to-Litiaton-in-India.pdf filter=lfs diff=lfs merge=lfs -text
5
- =======
6
- faiss_index/index.faiss filter=lfs diff=lfs merge=lfs -text
7
- resources/Guide-to-Litiaton-in-India.pdf filter=lfs diff=lfs merge=lfs -text
8
- >>>>>>> e9cd6c40756e45d6beab9c9d3c151efcb2a657a0
9
- resources/Guide-to-Litigation-in-India.pdf filter=lfs diff=lfs merge=lfs -text
10
- =======
11
- faiss_index/index.faiss filter=lfs diff=lfs merge=lfs -text
12
- resources/Guide-to-Litiaton-in-India.pdf filter=lfs diff=lfs merge=lfs -text
13
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
1
+
 
2
  faiss_index/index.faiss filter=lfs diff=lfs merge=lfs -text
3
  resources/Guide-to-Litiaton-in-India.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
Dockerfile CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  FROM python:3.10-slim
3
 
4
  WORKDIR /app
@@ -19,40 +18,8 @@ ENV GOOGLE_API_KEY=$GOOGLE_API_KEY1
19
  ARG LANGCHAIN_API_KEY1
20
  ENV LANGCHAIN_API_KEY=$LANGCHAIN_API_KEY1
21
 
22
- <<<<<<< HEAD
23
  EXPOSE 8000
24
 
25
  ENTRYPOINT ["chainlit", "run"]
26
- =======
27
- EXPOSE 8501
28
 
29
- ENTRYPOINT ["streamlit", "run"]
30
- >>>>>>> e9cd6c40756e45d6beab9c9d3c151efcb2a657a0
31
-
32
- =======
33
- FROM python:3.10-slim
34
-
35
- WORKDIR /app
36
-
37
- COPY . /app
38
-
39
-
40
- RUN apt-get update && apt-get install -y --no-install-recommends \
41
- ca-certificates \
42
- netbase \
43
- && rm -rf /var/lib/apt/lists/*
44
-
45
- RUN pip3 install -r requirements.txt
46
-
47
- ARG GOOGLE_API_KEY1
48
- ENV GOOGLE_API_KEY=$GOOGLE_API_KEY1
49
-
50
- ARG LANGCHAIN_API_KEY1
51
- ENV LANGCHAIN_API_KEY=$LANGCHAIN_API_KEY1
52
-
53
- EXPOSE 8501
54
-
55
- ENTRYPOINT ["streamlit", "run"]
56
-
57
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
58
  CMD ["app.py"]
 
 
1
  FROM python:3.10-slim
2
 
3
  WORKDIR /app
 
18
  ARG LANGCHAIN_API_KEY1
19
  ENV LANGCHAIN_API_KEY=$LANGCHAIN_API_KEY1
20
 
 
21
  EXPOSE 8000
22
 
23
  ENTRYPOINT ["chainlit", "run"]
 
 
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  CMD ["app.py"]
LICENSE CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  MIT License
3
 
4
  Copyright (c) 2025 Rajarshi Roy
@@ -20,26 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
  SOFTWARE.
23
- =======
24
- MIT License
25
-
26
- Copyright (c) 2025 Rajarshi Roy
27
-
28
- Permission is hereby granted, free of charge, to any person obtaining a copy
29
- of this software and associated documentation files (the "Software"), to deal
30
- in the Software without restriction, including without limitation the rights
31
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
32
- copies of the Software, and to permit persons to whom the Software is
33
- furnished to do so, subject to the following conditions:
34
-
35
- The above copyright notice and this permission notice shall be included in all
36
- copies or substantial portions of the Software.
37
-
38
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
41
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
42
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
43
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
44
- SOFTWARE.
45
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  MIT License
2
 
3
  Copyright (c) 2025 Rajarshi Roy
 
19
  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
  SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,7 +1,4 @@
1
- <<<<<<< HEAD
2
- =======
3
  ---
4
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
5
  title: Legal-Agent
6
  emoji: 🌖
7
  colorFrom: gray
@@ -10,8 +7,7 @@ sdk: docker
10
  sdk_version: 5.18.0
11
  app_file: app.py
12
  pinned: false
13
- <<<<<<< HEAD
14
-
15
 
16
  # Legal Agent Codebase
17
 
@@ -68,11 +64,7 @@ The system follows a multi-stage workflow:
68
 
69
  The code is organized into several classes and functions:
70
 
71
- <<<<<<< HEAD
72
- * **`DocumentRetriever`:** Loads and interacts with the FAISS index, retrieving relevant documents based on a query.
73
- =======
74
  * **`FaissRetriever`:** Loads and interacts with the FAISS index, retrieving relevant documents based on a query.
75
- >>>>>>> e9cd6c40756e45d6beab9c9d3c151efcb2a657a0
76
 
77
  * **`DocSummarizerPipeline`:** Summarizes retrieved documents using the Gemini model, generating a concise answer focused on the user's query. It uses a carefully crafted prompt to ensure the response is structured and informative.
78
 
@@ -307,6 +299,3 @@ Rajarshi Roy - [[email protected]](mailto:[email protected])
307
  This project is licensed under the MIT License. Feel free to modify and distribute it as per the terms of the license.
308
 
309
  I hope this README provides you with the necessary information to get started with the road to Generative AI with Google Gemini and Langchain.
310
- =======
311
- ---
312
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
 
1
  ---
 
2
  title: Legal-Agent
3
  emoji: 🌖
4
  colorFrom: gray
 
7
  sdk_version: 5.18.0
8
  app_file: app.py
9
  pinned: false
10
+ ---
 
11
 
12
  # Legal Agent Codebase
13
 
 
64
 
65
  The code is organized into several classes and functions:
66
 
 
 
 
67
  * **`FaissRetriever`:** Loads and interacts with the FAISS index, retrieving relevant documents based on a query.
 
68
 
69
  * **`DocSummarizerPipeline`:** Summarizes retrieved documents using the Gemini model, generating a concise answer focused on the user's query. It uses a carefully crafted prompt to ensure the response is structured and informative.
70
 
 
299
  This project is licensed under the MIT License. Feel free to modify and distribute it as per the terms of the license.
300
 
301
  I hope this README provides you with the necessary information to get started with the road to Generative AI with Google Gemini and Langchain.
 
 
 
app copy.py CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  import streamlit as st
3
  from legal_agent.components.full_workflow import run_user_query
4
 
@@ -41,47 +40,3 @@ if st.button("Send"):
41
  st.experimental_rerun()
42
  else:
43
  st.error("Please enter a valid query.")
44
- =======
45
- import streamlit as st
46
- from legal_agent.components.full_workflow import run_user_query
47
-
48
- # Initialize session state for conversation history
49
- if "conversation" not in st.session_state:
50
- st.session_state.conversation = []
51
-
52
- st.title("Legal Agent Chat App")
53
-
54
-
55
- # Display conversation history
56
- def display_conversation():
57
- for chat in st.session_state.conversation:
58
- if chat["role"] == "user":
59
- st.markdown(f"**User:** {chat['content']}")
60
- else:
61
- st.markdown(f"**Legal Agent:** {chat['content']}")
62
-
63
-
64
- display_conversation()
65
-
66
- # Input area for new query
67
- user_input = st.text_input("Enter your legal query:")
68
-
69
- if st.button("Send"):
70
- if user_input.strip():
71
- # Append user message to conversation
72
- st.session_state.conversation.append({"role": "user", "content": user_input})
73
-
74
- # Run the legal agent query workflow
75
- result = run_user_query(user_input)
76
- agent_response = result.get("response", "No response received.")
77
-
78
- # Append agent response to conversation
79
- st.session_state.conversation.append(
80
- {"role": "assistant", "content": agent_response}
81
- )
82
-
83
- # Clear the text input by rerunning the app
84
- st.experimental_rerun()
85
- else:
86
- st.error("Please enter a valid query.")
87
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  import streamlit as st
2
  from legal_agent.components.full_workflow import run_user_query
3
 
 
40
  st.experimental_rerun()
41
  else:
42
  st.error("Please enter a valid query.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,15 +1,5 @@
1
- <<<<<<< HEAD
2
  import chainlit as cl
3
  from legal_agent.components.full_workflow import run_user_query
4
- <<<<<<< HEAD
5
- import os
6
- =======
7
-
8
- # from QA_app.components.data_querying import user_query
9
- import chainlit as cl
10
-
11
- # user_query
12
- >>>>>>> e9cd6c40756e45d6beab9c9d3c151efcb2a657a0
13
 
14
 
15
  async def user_query_func(user_question):
@@ -35,8 +25,6 @@ async def main(message: cl.Message):
35
  # response = await user_query_func("What happended to the birds")
36
  response = await user_query_func(user_question)
37
  print(user_question, "see")
38
- <<<<<<< HEAD
39
- =======
40
  # user_query = cl.make_async(user_query)
41
 
42
  # await user_query("What happended to the birds")
@@ -46,61 +34,6 @@ async def main(message: cl.Message):
46
  # callback_handler = LangchainCallbackHandler(stream_final_answer=True)
47
  # response = await cl.make_async(user_query)(user_question)
48
  # response = await cl.make_async(user_query)(user_question)
49
- >>>>>>> e9cd6c40756e45d6beab9c9d3c151efcb2a657a0
50
 
51
  # await message.reply(response)
52
  await cl.Message(content=response["response"]).send()
53
-
54
-
55
- # # Run the Chainlit app
56
- # cl.run()
57
- =======
58
- import chainlit as cl
59
- from legal_agent.components.full_workflow import run_user_query
60
-
61
- # from QA_app.components.data_querying import user_query
62
- import chainlit as cl
63
-
64
- # user_query
65
-
66
-
67
- async def user_query_func(user_question):
68
- response = run_user_query(user_question)
69
- # Replace this with your actual logic for processing the user query
70
- # It could involve interacting with an LLM, searching web documents, etc.
71
- # For illustration purposes, let's just return a simple response
72
- return response
73
-
74
-
75
- @cl.on_chat_start
76
- def start():
77
- # user_query
78
-
79
- print("Chat started!")
80
-
81
-
82
- @cl.on_message
83
- async def main(message: cl.Message):
84
- # user_query
85
- user_question = message.content
86
- # response = user_query(user_question)
87
- # response = await user_query_func("What happended to the birds")
88
- response = await user_query_func(user_question)
89
- print(user_question, "see")
90
- # user_query = cl.make_async(user_query)
91
-
92
- # await user_query("What happended to the birds")
93
- # print(user_question, "see22222222")
94
-
95
- # Use LangchainCallbackHandler to capture the final answer
96
- # callback_handler = LangchainCallbackHandler(stream_final_answer=True)
97
- # response = await cl.make_async(user_query)(user_question)
98
- # response = await cl.make_async(user_query)(user_question)
99
-
100
- # await message.reply(response)
101
- await cl.Message(content=response["response"]).send()
102
-
103
-
104
- # # Run the Chainlit app
105
- # cl.run()
106
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  import chainlit as cl
2
  from legal_agent.components.full_workflow import run_user_query
 
 
 
 
 
 
 
 
 
3
 
4
 
5
  async def user_query_func(user_question):
 
25
  # response = await user_query_func("What happended to the birds")
26
  response = await user_query_func(user_question)
27
  print(user_question, "see")
 
 
28
  # user_query = cl.make_async(user_query)
29
 
30
  # await user_query("What happended to the birds")
 
34
  # callback_handler = LangchainCallbackHandler(stream_final_answer=True)
35
  # response = await cl.make_async(user_query)(user_question)
36
  # response = await cl.make_async(user_query)(user_question)
 
37
 
38
  # await message.reply(response)
39
  await cl.Message(content=response["response"]).send()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chainlit.md CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  # Welcome to Game Recommendation APP! 🚀🤖
3
 
4
  **Objective: Let the Games Begin!**
@@ -7,14 +6,4 @@ Welcome to the Game Recommendation Party! 🎉 Our mission is simple: to make ga
7
 
8
  Picture this: a magical realm where every gamer finds their perfect match, where the journey to discovering new games is filled with laughter, surprises, and endless joy. Our objective? To sprinkle a dash of fun into the gaming universe and ignite the spark of adventure in every player's heart.
9
 
10
- =======
11
- # Welcome to Game Recommendation APP! 🚀🤖
12
-
13
- **Objective: Let the Games Begin!**
14
-
15
- Welcome to the Game Recommendation Party! 🎉 Our mission is simple: to make gaming discovery as thrilling as playing the games themselves! With our vibrant and fun-loving Game Recommendation App, we're here to shake up the gaming world and inject a dose of excitement into every recommendation.
16
-
17
- Picture this: a magical realm where every gamer finds their perfect match, where the journey to discovering new games is filled with laughter, surprises, and endless joy. Our objective? To sprinkle a dash of fun into the gaming universe and ignite the spark of adventure in every player's heart.
18
-
19
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
20
  So, gear up, grab your controller, and get ready to embark on an epic quest through the world of gaming like never before. Let's turn every recommendation into a memorable adventure, where the only limit is your imagination! 🚀✨
 
 
1
  # Welcome to Game Recommendation APP! 🚀🤖
2
 
3
  **Objective: Let the Games Begin!**
 
6
 
7
  Picture this: a magical realm where every gamer finds their perfect match, where the journey to discovering new games is filled with laughter, surprises, and endless joy. Our objective? To sprinkle a dash of fun into the gaming universe and ignite the spark of adventure in every player's heart.
8
 
 
 
 
 
 
 
 
 
 
 
9
  So, gear up, grab your controller, and get ready to embark on an epic quest through the world of gaming like never before. Let's turn every recommendation into a memorable adventure, where the only limit is your imagination! 🚀✨
demo_app.py CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  # Importing Important libraries
3
 
4
  import streamlit as st
@@ -310,316 +309,3 @@ if st.session_state.get("run_id"):
310
 
311
  else:
312
  st.warning("Invalid feedback score.")
313
- =======
314
- # Importing Important libraries
315
-
316
- import streamlit as st
317
- from pathlib import Path
318
-
319
- import os
320
-
321
- import google.generativeai as genai
322
-
323
- from langchain_community.chat_message_histories.streamlit import (
324
- StreamlitChatMessageHistory,
325
- )
326
-
327
-
328
- from datetime import datetime
329
-
330
- from langchain.memory.buffer import ConversationBufferMemory
331
- from langchain.schema.runnable import RunnableMap
332
-
333
- from langchain_core.prompts import ChatPromptTemplate
334
- from langchain_core.prompts import MessagesPlaceholder
335
-
336
- from langchain_google_genai import ChatGoogleGenerativeAI
337
- from langchain.callbacks.tracers.langchain import wait_for_all_tracers
338
-
339
- import streamlit as st
340
- from streamlit_feedback import streamlit_feedback
341
-
342
-
343
- from langsmith import Client
344
-
345
- from langchain_core.tracers.context import collect_runs
346
-
347
-
348
- from dotenv import load_dotenv
349
-
350
- from langchain_community.vectorstores import FAISS
351
- from langchain_google_genai import ChatGoogleGenerativeAI
352
- from langchain_google_genai import GoogleGenerativeAIEmbeddings
353
-
354
- ## Loading APIs
355
-
356
- load_dotenv()
357
-
358
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
359
-
360
- genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
361
-
362
- gemini_api_key = os.getenv("GOOGLE_API_KEY")
363
-
364
-
365
- os.environ["LANGCHAIN_PROJECT"] = "GAME RECOMMENDATION" # Set your custom project name
366
-
367
- os.environ["LANGCHAIN_TRACING_V2"] = "true"
368
- os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
369
- langchain_api_key = os.getenv("LANGCHAIN_API_KEY")
370
-
371
- # Update with your API URL if using a hosted instance of Langsmith.
372
- langchain_endpoint = os.environ["LANGCHAIN_ENDPOINT"] = (
373
- "https://api.smith.langchain.com"
374
- )
375
-
376
- # Used LLM model
377
-
378
-
379
- # Adding an event loop
380
- import asyncio
381
- import aiohttp
382
-
383
-
384
- loop = asyncio.new_event_loop()
385
- asyncio.set_event_loop(loop)
386
-
387
-
388
- model = ChatGoogleGenerativeAI(
389
- model="gemini-1.5-pro-latest",
390
- api_key=gemini_api_key,
391
- temperature=0.3,
392
- convert_system_message_to_human=True,
393
- )
394
-
395
-
396
- # Configuring memory
397
- memory = ConversationBufferMemory(
398
- chat_memory=StreamlitChatMessageHistory(key="langchain_messages"),
399
- return_messages=True,
400
- memory_key="chat_history",
401
- )
402
-
403
-
404
- # Load Vector DB
405
- new_db = FAISS.load_local(
406
- "faiss_index", embeddings, allow_dangerous_deserialization=True
407
- )
408
-
409
- # Main retriever
410
- retriever = new_db.as_retriever()
411
-
412
- # Configuring Langsmith Client
413
- client = Client(api_url=langchain_endpoint, api_key=langchain_api_key)
414
-
415
-
416
- # Introducing try catch block in case you don't have a dataset with good feed back examples
417
- try:
418
- # Getting best feedback examples to save in the memory context
419
- examples = client.list_examples(
420
- dataset_name="Feedbacks"
421
- ) # Choose your dataset_name here
422
-
423
- my_examples = []
424
-
425
- for i in examples:
426
- print(i.inputs)
427
- print(i.outputs["output"]["content"])
428
- print("\n\n--------\n\n")
429
- my_examples.append(
430
- (i.inputs["input"], {"output": i.outputs["output"]["content"]})
431
- )
432
- except:
433
- my_examples = []
434
-
435
-
436
- my_examples = my_examples[:2]
437
-
438
- # Configuring our runnablemap
439
- ingress = RunnableMap(
440
- {
441
- "input": lambda x: x["input"],
442
- "chat_history": lambda x: memory.load_memory_variables(x)["chat_history"],
443
- "time": lambda _: str(datetime.now()),
444
- "context": lambda x: retriever.get_relevant_documents(x["input"]),
445
- "examples": lambda x: my_examples,
446
- }
447
- )
448
-
449
- # Making the prompt template
450
- prompt = ChatPromptTemplate.from_messages(
451
- [
452
- (
453
- "system",
454
- "Only discuss games. You are a GAME RECOMMENDATION system assistant. Be humble, greet users nicely, and answer their queries."
455
- """
456
- "Instructions":
457
- "Regardless of the input, always adhere to the context provided."
458
- "You can only make conversations based on the provided context. If a response cannot be formed strictly using the context, politely say you dont have knowledge about that topic."
459
- "Use the Context section to provide accurate answers, as if you knew this information innately."
460
- "If unsure, state that you don't know."
461
-
462
- "Context": {context}
463
-
464
- "Examples of Human feedback":
465
- {examples},
466
- """,
467
- # "system",
468
- # "Only and Only talk about games, nothing else, your knowledge is constraint games"
469
- # "You are a GAME RECOMMENDATION system assistant. You are humble AI. Greet the user nicely and answer their queries"
470
- # """
471
- # Use the information from the Context section to provide accurate answers but act as if you knew this information innately.
472
- # If unsure, simply state that you don't know.
473
- # Context: {context}
474
- # Here are some impressive examples of Human feedback, Do your best to try to generate these type of answer format for the specific format of questions
475
- # The examples are listed below :
476
- # {examples}
477
- # Assistant:""",
478
- ),
479
- MessagesPlaceholder(variable_name="chat_history"),
480
- ("human", "{input}"),
481
- ]
482
- )
483
-
484
- llm = model
485
-
486
- # Our final chain
487
- chain = ingress | prompt | llm
488
-
489
-
490
- # Initialize State
491
- if "trace_link" not in st.session_state:
492
- st.session_state.trace_link = None
493
- if "run_id" not in st.session_state:
494
- st.session_state.run_id = None
495
-
496
-
497
- # Sidebar to give option for Clearing message history
498
- if st.sidebar.button("Clear message history"):
499
- print("Clearing message history")
500
- memory.clear()
501
- st.session_state.trace_link = None
502
- st.session_state.run_id = None
503
-
504
-
505
- # When we get response from the Chatbot, then only we can see this Trace link
506
- if st.session_state.trace_link:
507
- st.sidebar.markdown(
508
- f'<a href="{st.session_state.trace_link}" target="_blank"><button>Latest Trace: 🛠️</button></a>',
509
- unsafe_allow_html=True,
510
- )
511
-
512
- st.header("Hey Gamers, I am a Game Recommender 🤖", divider="rainbow")
513
-
514
- for msg in st.session_state.langchain_messages:
515
- avatar = "🤖" if msg.type == "ai" else None
516
- with st.chat_message(msg.type, avatar=avatar):
517
- st.markdown(msg.content)
518
-
519
-
520
- # The main chatbot configuration to get desired out and create runs for Langsmith
521
- if prompt := st.chat_input(placeholder="Ask me a question!"):
522
- st.chat_message("user").write(prompt)
523
- with st.chat_message("assistant", avatar="🤖"):
524
- message_placeholder = st.empty()
525
- full_response = ""
526
- print("in chat here")
527
-
528
- # Getting the input
529
- input_dict = {"input": prompt}
530
-
531
- # Displaying the response from chatbot and collecting runs
532
- with collect_runs() as cb:
533
- for chunk in chain.stream(input_dict, config={"tags": ["Streamlit Chat"]}):
534
- full_response += chunk.content
535
- message_placeholder.markdown(full_response + "▌")
536
- memory.save_context(input_dict, {"output": full_response})
537
-
538
- # storing the run id in streamlit session
539
- ## Since the runnable sequence would come after retriever I have chosen `1` instead on `0`
540
- run_id = cb.traced_runs[1].id
541
-
542
- st.session_state.run_id = run_id
543
-
544
- wait_for_all_tracers()
545
- # Requires langsmith >= 0.0.19
546
-
547
- # Getting the Trace link
548
- url = client.share_run(run_id)
549
-
550
- st.session_state.trace_link = url
551
-
552
- message_placeholder.markdown(full_response)
553
-
554
- # Checking if we have messages in chat
555
- has_chat_messages = len(st.session_state.get("langchain_messages", [])) > 0
556
-
557
-
558
- # Only show the feedback toggle if there are chat messages
559
- if has_chat_messages:
560
- feedback_option = (
561
- "faces" if st.toggle(label="`Thumbs` ⇄ `Faces`", value=False) else "thumbs"
562
- )
563
-
564
- else:
565
- pass
566
-
567
- if st.session_state.get("run_id"):
568
- feedback = streamlit_feedback(
569
- feedback_type=feedback_option, # Use the selected feedback option
570
- optional_text_label="[Optional] Please provide an explanation", # Adding a label for optional text input
571
- key=f"feedback_{st.session_state.run_id}",
572
- align="flex-start",
573
- )
574
-
575
- # Define score mappings for both "thumbs" and "faces" feedback systems
576
- score_mappings = {
577
- "thumbs": {"👍": 1, "👎": 0},
578
- "faces": {"😀": 1, "🙂": 0.75, "😐": 0.5, "🙁": 0.25, "😞": 0},
579
- }
580
-
581
- # Get the score mapping based on the selected feedback option
582
- scores = score_mappings[feedback_option]
583
-
584
- if feedback:
585
- # Get the score from the selected feedback option's score mapping
586
- score = scores.get(feedback["score"])
587
-
588
- if score is not None:
589
- # Formulate feedback type string incorporating the feedback option and score value
590
- feedback_type_str = f"{feedback_option} {feedback['score']}"
591
-
592
- # Record the feedback with the formulated feedback type string and optional comment
593
- feedback_record = client.create_feedback(
594
- st.session_state.run_id,
595
- feedback_type_str, # Updated feedback type
596
- score=score,
597
- comment=feedback.get("text"),
598
- )
599
- st.session_state.feedback = {
600
- "feedback_id": str(feedback_record.id),
601
- "score": score,
602
- }
603
-
604
- # # Incase you want to add this run with feedback to simultaneously add to a dataset
605
- # run_id = st.session_state.get("run_id")
606
- # selected_runs = client.list_runs(id=[run_id])
607
-
608
- # for run in tqdm(selected_runs):
609
-
610
- # # print(run, "lets see")
611
- # print(run.inputs)
612
- # print(run.outputs)
613
- # print(run.extra)
614
- # print(run.feedback_stats)
615
-
616
- # client.create_examples(
617
- # inputs=[run.inputs],
618
- # outputs=[run.outputs],
619
- # feedback_stats=[run.feedback_stats],
620
- # dataset_id=<your-dataset-id>,
621
- # )
622
-
623
- else:
624
- st.warning("Invalid feedback score.")
625
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  # Importing Important libraries
2
 
3
  import streamlit as st
 
309
 
310
  else:
311
  st.warning("Invalid feedback score.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
demo_chaillit_app.py CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  # from QA_app.components.data_querying import user_query
3
  from chainlit import on_chat_start, on_message, LangchainCallbackHandler
4
  import chainlit as cl
@@ -57,63 +56,3 @@ async def main(message: cl.Message):
57
 
58
  # # Run the Chainlit app
59
  # cl.run()
60
- =======
61
- # from QA_app.components.data_querying import user_query
62
- from chainlit import on_chat_start, on_message, LangchainCallbackHandler
63
- import chainlit as cl
64
- from main_app_deploy.components.data_querying import my_query
65
-
66
-
67
- import os
68
-
69
- os.environ["LITERAL_API_KEY"] = os.getenv("LITERAL_API_KEY")
70
-
71
- os.environ["LANGCHAIN_PROJECT"] = "GAME RECOMMENDATION"
72
-
73
- os.environ["LANGCHAIN_TRACING_V2"] = "true"
74
- os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
75
-
76
-
77
- # user_query
78
-
79
-
80
- async def user_query_func(user_question):
81
- response = my_query(user_question)
82
- # Replace this with your actual logic for processing the user query
83
- # It could involve interacting with an LLM, searching web documents, etc.
84
- # For illustration purposes, let's just return a simple response
85
- return response
86
-
87
-
88
- @cl.on_chat_start
89
- def start():
90
- # user_query
91
-
92
- print("Chat started!")
93
-
94
-
95
- @cl.on_message
96
- async def main(message: cl.Message):
97
- # user_query
98
- user_question = message.content
99
- # response = user_query(user_question)
100
- # response = await user_query_func("What happended to the birds")
101
- response = await user_query_func(user_question)
102
- print(user_question, "see")
103
- # user_query = cl.make_async(user_query)
104
-
105
- # await user_query("What happended to the birds")
106
- # print(user_question, "see22222222")
107
-
108
- # Use LangchainCallbackHandler to capture the final answer
109
- # callback_handler = LangchainCallbackHandler(stream_final_answer=True)
110
- # response = await cl.make_async(user_query)(user_question)
111
- # response = await cl.make_async(user_query)(user_question)
112
-
113
- # await message.reply(response)
114
- await cl.Message(content=response).send()
115
-
116
-
117
- # # Run the Chainlit app
118
- # cl.run()
119
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  # from QA_app.components.data_querying import user_query
2
  from chainlit import on_chat_start, on_message, LangchainCallbackHandler
3
  import chainlit as cl
 
56
 
57
  # # Run the Chainlit app
58
  # cl.run()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
demo_main.py CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  from main_app_deploy.components.data_querying import my_query
3
  import os
4
 
@@ -10,16 +9,3 @@ os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
10
  ans = my_query("can you reccomend me some fantasy games? about 2 games")
11
 
12
  print(ans)
13
- =======
14
- from main_app_deploy.components.data_querying import my_query
15
- import os
16
-
17
- os.environ["LANGCHAIN_PROJECT"] = "GAME RECOMMENDATION"
18
-
19
- os.environ["LANGCHAIN_TRACING_V2"] = "true"
20
- os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
21
-
22
- ans = my_query("can you reccomend me some fantasy games? about 2 games")
23
-
24
- print(ans)
25
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  from main_app_deploy.components.data_querying import my_query
2
  import os
3
 
 
9
  ans = my_query("can you reccomend me some fantasy games? about 2 games")
10
 
11
  print(ans)
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -4,13 +4,9 @@ chromadb
4
  duckduckgo_search==6.3.4
5
  fastapi
6
  faiss-cpu
7
- <<<<<<< HEAD
8
- google-generativeai
9
- =======
10
  # flashrank[listwise]
11
  google-generativeai
12
  # google-play-scraper
13
- >>>>>>> e9cd6c40756e45d6beab9c9d3c151efcb2a657a0
14
  IPython
15
  ipykernel
16
  langchain==0.3.7
@@ -18,19 +14,6 @@ langchain-community==0.3.7
18
  langchain_core
19
  langchain-docling
20
  langchain_google_genai==2.0.4
21
- <<<<<<< HEAD
22
- langgraph==0.2.48
23
- langserve
24
- langsmith
25
- numpy
26
- pandas>=1.3.6
27
- pypdf
28
- python-dotenv
29
- regex
30
- streamlit
31
- umap-learn
32
- unstructured[pdf]==0.7.12
33
- =======
34
  # langchain_huggingface
35
  langgraph==0.2.48
36
  langserve
@@ -45,5 +28,4 @@ streamlit
45
  unstructured[pdf]==0.7.12
46
  # sse_starlette
47
  # uvicorn
48
- >>>>>>> e9cd6c40756e45d6beab9c9d3c151efcb2a657a0
49
  -e .
 
4
  duckduckgo_search==6.3.4
5
  fastapi
6
  faiss-cpu
 
 
 
7
  # flashrank[listwise]
8
  google-generativeai
9
  # google-play-scraper
 
10
  IPython
11
  ipykernel
12
  langchain==0.3.7
 
14
  langchain_core
15
  langchain-docling
16
  langchain_google_genai==2.0.4
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # langchain_huggingface
18
  langgraph==0.2.48
19
  langserve
 
28
  unstructured[pdf]==0.7.12
29
  # sse_starlette
30
  # uvicorn
 
31
  -e .
setup.py CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  import setuptools
3
 
4
 
@@ -29,35 +28,3 @@ setuptools.setup(
29
  package_dir={"": "src"},
30
  packages=setuptools.find_packages(where="src"),
31
  )
32
- =======
33
- import setuptools
34
-
35
-
36
- with open("README.md", "r", encoding="utf-8") as f:
37
- long_description = f.read()
38
-
39
-
40
- __version__ = "0.0.0.1"
41
-
42
- REPO_NAME = "legal-agent"
43
- AUTHOR_USER_NAME = "Rajarshi12321"
44
- SRC_REPO = "legal_agent"
45
- AUTHOR_EMAIL = "[email protected]"
46
-
47
-
48
- setuptools.setup(
49
- name=SRC_REPO,
50
- version=__version__,
51
- author=AUTHOR_USER_NAME,
52
- author_email=AUTHOR_EMAIL,
53
- description="A small python package for game recommendation using RAG model",
54
- long_description=long_description,
55
- long_description_content="text/markdown",
56
- url=f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}",
57
- project_urls={
58
- "Bug Tracker": f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}/issues",
59
- },
60
- package_dir={"": "src"},
61
- packages=setuptools.find_packages(where="src"),
62
- )
63
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  import setuptools
2
 
3
 
 
28
  package_dir={"": "src"},
29
  packages=setuptools.find_packages(where="src"),
30
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
template.py CHANGED
@@ -1,4 +1,3 @@
1
- <<<<<<< HEAD
2
  import os
3
  from pathlib import Path
4
  import logging
@@ -34,40 +33,3 @@ for filepath in list_of_files:
34
 
35
  else:
36
  logging.info(f"{filename} is already exists")
37
- =======
38
- import os
39
- from pathlib import Path
40
- import logging
41
-
42
- # logging string
43
- logging.basicConfig(level=logging.INFO, format="[%(asctime)s]: %(message)s:")
44
-
45
- project_name = "legal_agent"
46
-
47
- list_of_files = [
48
- ".github/workflows/.gitkeep",
49
- f"src/{project_name}/__init__.py",
50
- f"src/{project_name}/components/__init__.py",
51
- f"src/{project_name}/utils/__init__.py",
52
- "requirements.txt",
53
- "setup.py",
54
- "research/trials.ipynb",
55
- ]
56
-
57
-
58
- for filepath in list_of_files:
59
- filepath = Path(filepath)
60
- filedir, filename = os.path.split(filepath)
61
-
62
- if filedir != "":
63
- os.makedirs(filedir, exist_ok=True)
64
- logging.info(f"Creating directory; {filedir} for the file: {filename}")
65
-
66
- if (not os.path.exists(filepath)) or (os.path.getsize(filepath) == 0):
67
- with open(filepath, "w") as f:
68
- pass
69
- logging.info(f"Creating empty file: {filepath}")
70
-
71
- else:
72
- logging.info(f"{filename} is already exists")
73
- >>>>>>> 77c2a5e7f373b9fd0f02cb3a080cbf240a85f4ef
 
 
1
  import os
2
  from pathlib import Path
3
  import logging
 
33
 
34
  else:
35
  logging.info(f"{filename} is already exists")