Zmiany main.py
Browse files
main.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
import os
|
2 |
|
3 |
import uvicorn
|
|
|
4 |
from fastapi import FastAPI, HTTPException
|
5 |
-
from fastapi.responses import Response
|
6 |
from pydantic import BaseModel
|
7 |
from transformers import pipeline
|
8 |
|
@@ -76,10 +77,17 @@ async def generate_text(request: GenerationRequest):
|
|
76 |
temperature=request.temperature,
|
77 |
top_p=request.top_p,
|
78 |
do_sample=True, # Wa偶ne dla generowania z temperatur膮
|
|
|
79 |
)
|
80 |
# Pipeline zwraca list臋 s艂ownik贸w, bierzemy pierwszy wynik
|
81 |
#gen_text = {"generated_text": generated_text[0]["generated_text"]}
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
# return {"generated_text": generated_text[0]["generated_text"]}
|
84 |
except Exception as e:
|
85 |
raise HTTPException(status_code=500, detail=f"B艂膮d podczas generowania tekstu: {e}")
|
|
|
1 |
import os
|
2 |
|
3 |
import uvicorn
|
4 |
+
import torch
|
5 |
from fastapi import FastAPI, HTTPException
|
6 |
+
from fastapi.responses import JSONResponse, Response
|
7 |
from pydantic import BaseModel
|
8 |
from transformers import pipeline
|
9 |
|
|
|
77 |
temperature=request.temperature,
|
78 |
top_p=request.top_p,
|
79 |
do_sample=True, # Wa偶ne dla generowania z temperatur膮
|
80 |
+
return_full_text=False # Dodaj ten parametr, aby model nie zwraca艂 od razu promptu
|
81 |
)
|
82 |
# Pipeline zwraca list臋 s艂ownik贸w, bierzemy pierwszy wynik
|
83 |
#gen_text = {"generated_text": generated_text[0]["generated_text"]}
|
84 |
+
|
85 |
+
response_data = {"generated_text": generated_text[0]["generated_text"]}
|
86 |
+
return JSONResponse(
|
87 |
+
content=response_data,
|
88 |
+
media_type="application/json; charset=utf-8"
|
89 |
+
)
|
90 |
+
#return Response(content=generated_text[0]["generated_text"], media_type="text/plain; charset=utf-8")
|
91 |
# return {"generated_text": generated_text[0]["generated_text"]}
|
92 |
except Exception as e:
|
93 |
raise HTTPException(status_code=500, detail=f"B艂膮d podczas generowania tekstu: {e}")
|