Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,42 +4,44 @@ from fastapi import FastAPI
|
|
| 4 |
from pydantic import BaseModel
|
| 5 |
from typing import Optional
|
| 6 |
|
| 7 |
-
# LlamaIndex (>= 0.10.0)
|
| 8 |
from llama_index.core import Document
|
| 9 |
from llama_index.core.settings import Settings
|
| 10 |
-
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
|
|
|
| 11 |
from llama_index.llms.llama_cpp import LlamaCPP
|
| 12 |
from llama_index.core.base.llms.base import BaseLLM
|
| 13 |
|
| 14 |
-
# Embedding local (transformers + torch)
|
| 15 |
from transformers import AutoTokenizer, AutoModel
|
| 16 |
import torch
|
| 17 |
import torch.nn.functional as F
|
| 18 |
-
|
| 19 |
import os
|
| 20 |
|
|
|
|
| 21 |
app = FastAPI()
|
| 22 |
|
| 23 |
-
# ✅ Configuration
|
| 24 |
CACHE_DIR = "/app/cache"
|
| 25 |
os.environ["HF_HOME"] = CACHE_DIR
|
| 26 |
os.environ["TRANSFORMERS_CACHE"] = CACHE_DIR
|
| 27 |
os.environ["HF_MODULES_CACHE"] = CACHE_DIR
|
| 28 |
os.environ["HF_HUB_CACHE"] = CACHE_DIR
|
| 29 |
|
| 30 |
-
# ✅
|
| 31 |
MODEL_NAME = "BAAI/bge-small-en-v1.5"
|
| 32 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, cache_dir=CACHE_DIR)
|
| 33 |
model = AutoModel.from_pretrained(MODEL_NAME, cache_dir=CACHE_DIR)
|
| 34 |
|
|
|
|
| 35 |
def get_embedding(text: str):
|
| 36 |
with torch.no_grad():
|
| 37 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 38 |
outputs = model(**inputs)
|
| 39 |
-
embeddings = outputs.last_hidden_state[:, 0] #
|
| 40 |
return F.normalize(embeddings, p=2, dim=1).squeeze().tolist()
|
| 41 |
|
| 42 |
-
# ✅ Format des données
|
| 43 |
class ChunkRequest(BaseModel):
|
| 44 |
text: str
|
| 45 |
max_tokens: Optional[int] = 1000
|
|
@@ -49,12 +51,13 @@ class ChunkRequest(BaseModel):
|
|
| 49 |
source: Optional[str] = None
|
| 50 |
type: Optional[str] = None
|
| 51 |
|
|
|
|
| 52 |
@app.post("/chunk")
|
| 53 |
async def chunk_text(data: ChunkRequest):
|
| 54 |
try:
|
| 55 |
print(f"\n✅ Texte reçu ({len(data.text)} caractères) : {data.text[:200]}...", flush=True)
|
| 56 |
|
| 57 |
-
# ✅ Chargement du modèle GGUF distant
|
| 58 |
llm = LlamaCPP(
|
| 59 |
model_url="https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q4_K_M.gguf",
|
| 60 |
temperature=0.1,
|
|
@@ -66,17 +69,17 @@ async def chunk_text(data: ChunkRequest):
|
|
| 66 |
|
| 67 |
print("✅ Modèle LLM chargé avec succès !")
|
| 68 |
|
| 69 |
-
# ✅ Wrapper
|
| 70 |
class SimpleEmbedding:
|
| 71 |
def get_text_embedding(self, text: str):
|
| 72 |
return get_embedding(text)
|
| 73 |
|
| 74 |
-
# ✅ Configuration
|
| 75 |
-
assert isinstance(llm, BaseLLM), "❌ L
|
| 76 |
Settings.llm = llm
|
| 77 |
Settings.embed_model = SimpleEmbedding()
|
| 78 |
|
| 79 |
-
print("✅ Configuration du LLM et de l'embedding terminée. On initialise le Semantic Splitter..."
|
| 80 |
|
| 81 |
parser = SemanticSplitterNodeParser.from_defaults(llm=llm)
|
| 82 |
doc = Document(text=data.text)
|
|
@@ -85,15 +88,14 @@ async def chunk_text(data: ChunkRequest):
|
|
| 85 |
nodes = parser.get_nodes_from_documents([doc])
|
| 86 |
print(f"✅ Semantic Splitter : {len(nodes)} chunks générés")
|
| 87 |
if not nodes:
|
| 88 |
-
raise ValueError("Aucun chunk produit par
|
| 89 |
-
|
| 90 |
except Exception as e:
|
| 91 |
print(f"⚠️ Fallback vers RecursiveTextSplitter suite à : {e}")
|
| 92 |
splitter = RecursiveTextSplitter(chunk_size=data.max_tokens, chunk_overlap=data.overlap)
|
| 93 |
nodes = splitter.get_nodes_from_documents([doc])
|
| 94 |
print(f"♻️ Recursive Splitter : {len(nodes)} chunks générés")
|
| 95 |
|
| 96 |
-
# ✅
|
| 97 |
return {
|
| 98 |
"chunks": [node.text for node in nodes],
|
| 99 |
"metadatas": [node.metadata for node in nodes],
|
|
@@ -101,13 +103,14 @@ async def chunk_text(data: ChunkRequest):
|
|
| 101 |
"titre": data.titre,
|
| 102 |
"source": data.source,
|
| 103 |
"type": data.type,
|
| 104 |
-
"error": None #
|
| 105 |
}
|
| 106 |
|
| 107 |
except Exception as e:
|
| 108 |
print(f"❌ Erreur critique : {e}")
|
| 109 |
return {"error": str(e)}
|
| 110 |
|
|
|
|
| 111 |
if __name__ == "__main__":
|
| 112 |
import uvicorn
|
| 113 |
uvicorn.run("app:app", host="0.0.0.0", port=7860)
|
|
|
|
| 4 |
from pydantic import BaseModel
|
| 5 |
from typing import Optional
|
| 6 |
|
| 7 |
+
# ✅ Modules LlamaIndex (version >= 0.10.0)
|
| 8 |
from llama_index.core import Document
|
| 9 |
from llama_index.core.settings import Settings
|
| 10 |
+
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
| 11 |
+
from llama_index.core.text_splitter import RecursiveTextSplitter
|
| 12 |
from llama_index.llms.llama_cpp import LlamaCPP
|
| 13 |
from llama_index.core.base.llms.base import BaseLLM
|
| 14 |
|
| 15 |
+
# ✅ Embedding local (transformers + torch)
|
| 16 |
from transformers import AutoTokenizer, AutoModel
|
| 17 |
import torch
|
| 18 |
import torch.nn.functional as F
|
|
|
|
| 19 |
import os
|
| 20 |
|
| 21 |
+
# ✅ Initialisation de l'app FastAPI
|
| 22 |
app = FastAPI()
|
| 23 |
|
| 24 |
+
# ✅ Configuration du cache Hugging Face (important pour HF Spaces)
|
| 25 |
CACHE_DIR = "/app/cache"
|
| 26 |
os.environ["HF_HOME"] = CACHE_DIR
|
| 27 |
os.environ["TRANSFORMERS_CACHE"] = CACHE_DIR
|
| 28 |
os.environ["HF_MODULES_CACHE"] = CACHE_DIR
|
| 29 |
os.environ["HF_HUB_CACHE"] = CACHE_DIR
|
| 30 |
|
| 31 |
+
# ✅ Choix du modèle d'embedding dense (ex : BGE-small)
|
| 32 |
MODEL_NAME = "BAAI/bge-small-en-v1.5"
|
| 33 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, cache_dir=CACHE_DIR)
|
| 34 |
model = AutoModel.from_pretrained(MODEL_NAME, cache_dir=CACHE_DIR)
|
| 35 |
|
| 36 |
+
# ✅ Fonction d'embedding normalisé (vectorisation dense)
|
| 37 |
def get_embedding(text: str):
|
| 38 |
with torch.no_grad():
|
| 39 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 40 |
outputs = model(**inputs)
|
| 41 |
+
embeddings = outputs.last_hidden_state[:, 0] # On prend le token [CLS]
|
| 42 |
return F.normalize(embeddings, p=2, dim=1).squeeze().tolist()
|
| 43 |
|
| 44 |
+
# ✅ Format des données envoyées à l’API
|
| 45 |
class ChunkRequest(BaseModel):
|
| 46 |
text: str
|
| 47 |
max_tokens: Optional[int] = 1000
|
|
|
|
| 51 |
source: Optional[str] = None
|
| 52 |
type: Optional[str] = None
|
| 53 |
|
| 54 |
+
# ✅ Route de l’API pour le chunking sémantique
|
| 55 |
@app.post("/chunk")
|
| 56 |
async def chunk_text(data: ChunkRequest):
|
| 57 |
try:
|
| 58 |
print(f"\n✅ Texte reçu ({len(data.text)} caractères) : {data.text[:200]}...", flush=True)
|
| 59 |
|
| 60 |
+
# ✅ Chargement du modèle GGUF distant avec LlamaCPP (CPU friendly)
|
| 61 |
llm = LlamaCPP(
|
| 62 |
model_url="https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q4_K_M.gguf",
|
| 63 |
temperature=0.1,
|
|
|
|
| 69 |
|
| 70 |
print("✅ Modèle LLM chargé avec succès !")
|
| 71 |
|
| 72 |
+
# ✅ Wrapper embedding compatible avec LlamaIndex
|
| 73 |
class SimpleEmbedding:
|
| 74 |
def get_text_embedding(self, text: str):
|
| 75 |
return get_embedding(text)
|
| 76 |
|
| 77 |
+
# ✅ Configuration globale de LlamaIndex
|
| 78 |
+
assert isinstance(llm, BaseLLM), "❌ L’objet LLM n’est pas compatible avec LlamaIndex"
|
| 79 |
Settings.llm = llm
|
| 80 |
Settings.embed_model = SimpleEmbedding()
|
| 81 |
|
| 82 |
+
print("✅ Configuration du LLM et de l'embedding terminée. On initialise le Semantic Splitter...")
|
| 83 |
|
| 84 |
parser = SemanticSplitterNodeParser.from_defaults(llm=llm)
|
| 85 |
doc = Document(text=data.text)
|
|
|
|
| 88 |
nodes = parser.get_nodes_from_documents([doc])
|
| 89 |
print(f"✅ Semantic Splitter : {len(nodes)} chunks générés")
|
| 90 |
if not nodes:
|
| 91 |
+
raise ValueError("Aucun chunk produit par SemanticSplitter")
|
|
|
|
| 92 |
except Exception as e:
|
| 93 |
print(f"⚠️ Fallback vers RecursiveTextSplitter suite à : {e}")
|
| 94 |
splitter = RecursiveTextSplitter(chunk_size=data.max_tokens, chunk_overlap=data.overlap)
|
| 95 |
nodes = splitter.get_nodes_from_documents([doc])
|
| 96 |
print(f"♻️ Recursive Splitter : {len(nodes)} chunks générés")
|
| 97 |
|
| 98 |
+
# ✅ Résultat structuré pour n8n ou autre client HTTP
|
| 99 |
return {
|
| 100 |
"chunks": [node.text for node in nodes],
|
| 101 |
"metadatas": [node.metadata for node in nodes],
|
|
|
|
| 103 |
"titre": data.titre,
|
| 104 |
"source": data.source,
|
| 105 |
"type": data.type,
|
| 106 |
+
"error": None # ← utilisé par n8n pour signaler "pas d'erreur"
|
| 107 |
}
|
| 108 |
|
| 109 |
except Exception as e:
|
| 110 |
print(f"❌ Erreur critique : {e}")
|
| 111 |
return {"error": str(e)}
|
| 112 |
|
| 113 |
+
# ✅ Lancement local (facultatif pour HF Spaces)
|
| 114 |
if __name__ == "__main__":
|
| 115 |
import uvicorn
|
| 116 |
uvicorn.run("app:app", host="0.0.0.0", port=7860)
|