Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,9 +2,10 @@ from fastapi import FastAPI
|
|
| 2 |
from pydantic import BaseModel
|
| 3 |
from typing import Optional
|
| 4 |
|
| 5 |
-
from llama_index.core import
|
| 6 |
-
from llama_index.
|
| 7 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
|
|
| 8 |
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
| 9 |
|
| 10 |
app = FastAPI()
|
|
@@ -33,14 +34,25 @@ async def chunk_text(data: ChunkRequest):
|
|
| 33 |
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
| 34 |
|
| 35 |
# ✅ Configuration du service IA
|
| 36 |
-
service_context = ServiceContext.from_defaults(
|
| 37 |
-
llm=llm,
|
| 38 |
-
embed_model=embed_model
|
| 39 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
|
|
|
| 41 |
try:
|
| 42 |
# ✅ Découpage sémantique intelligent
|
| 43 |
-
parser = SemanticSplitterNodeParser.from_defaults(service_context=service_context)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
nodes = parser.get_nodes_from_documents([Document(text=data.text)])
|
| 45 |
|
| 46 |
return {
|
|
|
|
| 2 |
from pydantic import BaseModel
|
| 3 |
from typing import Optional
|
| 4 |
|
| 5 |
+
from llama_index.core.settings import Settings
|
| 6 |
+
from llama_index.core import Document
|
| 7 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
| 8 |
+
from llama_index.llms.llama_cpp import LlamaCPP
|
| 9 |
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
| 10 |
|
| 11 |
app = FastAPI()
|
|
|
|
| 34 |
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
| 35 |
|
| 36 |
# ✅ Configuration du service IA
|
| 37 |
+
# service_context = ServiceContext.from_defaults(
|
| 38 |
+
# llm=llm,
|
| 39 |
+
# embed_model=embed_model
|
| 40 |
+
# )
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# ✅ Nouvelle méthode recommandée : paramétrer Settings globalement
|
| 44 |
+
Settings.llm = llm
|
| 45 |
+
Settings.embed_model = embed_model
|
| 46 |
+
|
| 47 |
|
| 48 |
+
|
| 49 |
try:
|
| 50 |
# ✅ Découpage sémantique intelligent
|
| 51 |
+
# parser = SemanticSplitterNodeParser.from_defaults(service_context=service_context)
|
| 52 |
+
|
| 53 |
+
# ✅ Appel du parser sans service_context
|
| 54 |
+
|
| 55 |
+
parser = SemanticSplitterNodeParser.from_defaults()
|
| 56 |
nodes = parser.get_nodes_from_documents([Document(text=data.text)])
|
| 57 |
|
| 58 |
return {
|