File size: 2,012 Bytes
ae5ad81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
import os
from huggingface_hub import InferenceClient

app = FastAPI()

# Statische Dateien (CSS, JS) einbinden
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")

# HF-Token aus Umgebungsvariable
HF_TOKEN = os.environ.get("HF_TOKEN", "")

@app.get("/", response_class=HTMLResponse)
async def root(request: Request):
    return templates.TemplateResponse("index.html", {"request": request})

@app.post("/analyze")
async def analyze_text(request: Request):
    data = await request.json()
    user_text = data.get("text", "")
    
    # Sentiment-Analyse durchführen
    client = InferenceClient(token=HF_TOKEN)
    sentiment = client.text_classification(
        text=user_text,
        model="cardiffnlp/twitter-roberta-base-sentiment"
    )
    
    # Chat-Antwort generieren
    messages = data.get("history", [])
    messages.append({"role": "user", "content": user_text})
    
    # System-Prompt basierend auf Stimmung
    best_sentiment = sorted(sentiment, key=lambda x: x["score"], reverse=True)[0]
    if best_sentiment["label"] == "NEGATIVE" and best_sentiment["score"] > 0.6:
        messages.append({"role": "system", "content": "Der Patient zeigt starke negative Emotionen – schlage Schuldprojektion oder Verdrängung vor."})
    elif best_sentiment["label"] == "POSITIVE" and best_sentiment["score"] > 0.6:
        messages.append({"role": "system", "content": "Der Patient wirkt übertrieben positiv – möglicherweise Abwehrmechanismus durch Kompensation."})
    
    # Chat-Antwort generieren
    response = client.chat_completion(
        model="gpt-3.5-turbo",
        messages=messages
    )
    
    return {
        "reply": response.generated_text,
        "toneLabel": best_sentiment["label"],
        "toneScore": best_sentiment["score"]
    }