Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,13 @@ from all_models import models
|
|
6 |
from io import BytesIO
|
7 |
from PIL import Image
|
8 |
from fastapi import FastAPI, Request
|
|
|
9 |
|
10 |
css_code = os.getenv("DazDinGo_CSS")
|
11 |
|
|
|
|
|
|
|
12 |
# Load models
|
13 |
models_load = {}
|
14 |
for model in models:
|
@@ -22,9 +26,11 @@ app = FastAPI()
|
|
22 |
def gen_image(model_str, prompt):
|
23 |
if model_str == 'NA':
|
24 |
return None
|
|
|
|
|
25 |
noise = str(randint(0, 4294967296))
|
26 |
klir = '| ultra detail, ultra elaboration, ultra quality, perfect'
|
27 |
-
return models_load[model_str](f'{
|
28 |
|
29 |
def image_to_base64(image):
|
30 |
buffered = BytesIO()
|
@@ -45,7 +51,9 @@ async def api_generate(request: Request):
|
|
45 |
if model not in models:
|
46 |
return {"error": "Model not found"}
|
47 |
|
48 |
-
|
|
|
|
|
49 |
if image is None:
|
50 |
return {"error": "Image generation failed"}
|
51 |
|
@@ -54,7 +62,8 @@ async def api_generate(request: Request):
|
|
54 |
return {
|
55 |
"status": "success",
|
56 |
"model": model,
|
57 |
-
"
|
|
|
58 |
"image_base64": base64_str,
|
59 |
"image_format": "jpeg"
|
60 |
}
|
@@ -102,7 +111,9 @@ def make_me():
|
|
102 |
json_output = gr.JSON(label="API Response")
|
103 |
|
104 |
def generate_wrapper(model_str, prompt):
|
105 |
-
|
|
|
|
|
106 |
if image is None:
|
107 |
return None, {"error": "Generation failed"}
|
108 |
|
@@ -110,7 +121,8 @@ def make_me():
|
|
110 |
response = {
|
111 |
"status": "success",
|
112 |
"model": model_str,
|
113 |
-
"
|
|
|
114 |
"image_base64": base64_str,
|
115 |
"image_format": "jpeg"
|
116 |
}
|
|
|
6 |
from io import BytesIO
|
7 |
from PIL import Image
|
8 |
from fastapi import FastAPI, Request
|
9 |
+
from translatepy import Translator
|
10 |
|
11 |
css_code = os.getenv("DazDinGo_CSS")
|
12 |
|
13 |
+
# Initialize translator
|
14 |
+
translator = Translator()
|
15 |
+
|
16 |
# Load models
|
17 |
models_load = {}
|
18 |
for model in models:
|
|
|
26 |
def gen_image(model_str, prompt):
|
27 |
if model_str == 'NA':
|
28 |
return None
|
29 |
+
# Translate prompt to English
|
30 |
+
translated_prompt = str(translator.translate(prompt, 'English'))
|
31 |
noise = str(randint(0, 4294967296))
|
32 |
klir = '| ultra detail, ultra elaboration, ultra quality, perfect'
|
33 |
+
return models_load[model_str](f'{translated_prompt} {klir} {noise}')
|
34 |
|
35 |
def image_to_base64(image):
|
36 |
buffered = BytesIO()
|
|
|
51 |
if model not in models:
|
52 |
return {"error": "Model not found"}
|
53 |
|
54 |
+
# Translate prompt to English for API endpoint too
|
55 |
+
translated_prompt = str(translator.translate(prompt, 'English'))
|
56 |
+
image = gen_image(model, translated_prompt)
|
57 |
if image is None:
|
58 |
return {"error": "Image generation failed"}
|
59 |
|
|
|
62 |
return {
|
63 |
"status": "success",
|
64 |
"model": model,
|
65 |
+
"original_prompt": prompt,
|
66 |
+
"translated_prompt": translated_prompt,
|
67 |
"image_base64": base64_str,
|
68 |
"image_format": "jpeg"
|
69 |
}
|
|
|
111 |
json_output = gr.JSON(label="API Response")
|
112 |
|
113 |
def generate_wrapper(model_str, prompt):
|
114 |
+
# Translate prompt to English
|
115 |
+
translated_prompt = str(translator.translate(prompt, 'English'))
|
116 |
+
image = gen_image(model_str, translated_prompt)
|
117 |
if image is None:
|
118 |
return None, {"error": "Generation failed"}
|
119 |
|
|
|
121 |
response = {
|
122 |
"status": "success",
|
123 |
"model": model_str,
|
124 |
+
"original_prompt": prompt,
|
125 |
+
"translated_prompt": translated_prompt,
|
126 |
"image_base64": base64_str,
|
127 |
"image_format": "jpeg"
|
128 |
}
|