Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ from llama_index.node_parser import SemanticSplitterNodeParser
|
|
5 |
from llama_index.embeddings import OpenAIEmbedding
|
6 |
from llama_index.ingestion import IngestionPipeline
|
7 |
from pinecone.grpc import PineconeGRPC
|
|
|
8 |
from llama_index.vector_stores import PineconeVectorStore
|
9 |
from llama_index import VectorStoreIndex
|
10 |
from llama_index.retrievers import VectorIndexRetriever
|
@@ -21,10 +22,15 @@ client = OpenAI(api_key=openai_api_key)
|
|
21 |
# Initialize connection to Pinecone
|
22 |
pc = PineconeGRPC(api_key=pinecone_api_key)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
# Initialize your index
|
26 |
if index_name not in pc.list_indexes():
|
27 |
-
pc.create_index(name=index_name, dimension=1536)
|
28 |
|
29 |
pinecone_index = pc.Index(index_name)
|
30 |
|
|
|
5 |
from llama_index.embeddings import OpenAIEmbedding
|
6 |
from llama_index.ingestion import IngestionPipeline
|
7 |
from pinecone.grpc import PineconeGRPC
|
8 |
+
from pinecone import ServerlessSpec
|
9 |
from llama_index.vector_stores import PineconeVectorStore
|
10 |
from llama_index import VectorStoreIndex
|
11 |
from llama_index.retrievers import VectorIndexRetriever
|
|
|
22 |
# Initialize connection to Pinecone
|
23 |
pc = PineconeGRPC(api_key=pinecone_api_key)
|
24 |
|
25 |
+
# Define the spec for the Pinecone index
|
26 |
+
spec = ServerlessSpec(
|
27 |
+
replicas=1,
|
28 |
+
pod_type="p1"
|
29 |
+
)
|
30 |
|
31 |
# Initialize your index
|
32 |
if index_name not in pc.list_indexes():
|
33 |
+
pc.create_index(name=index_name, dimension=1536, spec=spec)
|
34 |
|
35 |
pinecone_index = pc.Index(index_name)
|
36 |
|