Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -48,12 +48,12 @@ from langchain_community.vectorstores import FAISS
|
|
48 |
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
49 |
|
50 |
|
51 |
-
|
52 |
|
53 |
# Define the LLM - We shall use ChatGroq of Groq Platform and LLama70B
|
54 |
# This llm definition is redundant as now models will be chosen by user
|
55 |
llm = ChatGroq(
|
56 |
-
api_key=
|
57 |
model="llama3-70b-8192",
|
58 |
# model = 'gemma-7b-it',
|
59 |
temperature = 0
|
@@ -89,7 +89,7 @@ retriever2 = vectorstore1.as_retriever(search_type='mmr',search_kwargs={"k": 10}
|
|
89 |
|
90 |
import os
|
91 |
|
92 |
-
os.environ["OPENAI_API_KEY"] =
|
93 |
|
94 |
|
95 |
|
|
|
48 |
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
49 |
|
50 |
|
51 |
+
groq_api_key = os.getenv('groq')
|
52 |
|
53 |
# Define the LLM - We shall use ChatGroq of Groq Platform and LLama70B
|
54 |
# This llm definition is redundant as now models will be chosen by user
|
55 |
llm = ChatGroq(
|
56 |
+
api_key=groq_api_key,
|
57 |
model="llama3-70b-8192",
|
58 |
# model = 'gemma-7b-it',
|
59 |
temperature = 0
|
|
|
89 |
|
90 |
import os
|
91 |
|
92 |
+
os.environ["OPENAI_API_KEY"] = groq_api_key
|
93 |
|
94 |
|
95 |
|