igen / app.py
Vivawaves's picture
Update app.py
85fd0ed
import gradio as gr
import requests
import time
import json
import base64
import os
from io import BytesIO
import html
import re
from deep_translator import GoogleTranslator
from langdetect import detect
class Prodia:
def __init__(self, api_key, base=None):
self.base = base or "https://api.prodia.com/v1"
self.headers = {
"X-Prodia-Key": api_key
}
def generate(self, params):
response = self._post(f"{self.base}/sd/generate", params)
return response.json()
def transform(self, params):
response = self._post(f"{self.base}/sd/transform", params)
return response.json()
def controlnet(self, params):
response = self._post(f"{self.base}/sd/controlnet", params)
return response.json()
def get_job(self, job_id):
response = self._get(f"{self.base}/job/{job_id}")
return response.json()
def wait(self, job):
job_result = job
while job_result['status'] not in ['succeeded', 'failed']:
time.sleep(0.25)
job_result = self.get_job(job['job'])
return job_result
def list_models(self):
response = self._get(f"{self.base}/sd/models")
return response.json()
def list_samplers(self):
response = self._get(f"{self.base}/sd/samplers")
return response.json()
def _post(self, url, params):
headers = {
**self.headers,
"Content-Type": "application/json"
}
response = requests.post(url, headers=headers, data=json.dumps(params))
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def _get(self, url):
response = requests.get(url, headers=self.headers)
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def image_to_base64(image):
# Convert the image to bytes
buffered = BytesIO()
image.save(buffered, format="WEBP") # You can change format to PNG if needed
# Encode the bytes to base64
img_str = base64.b64encode(buffered.getvalue())
return img_str.decode('utf-8') # Convert bytes to string
def remove_id_and_ext(text):
text = re.sub(r'\[.*\]$', '', text)
extension = text[-12:].strip()
if extension == "safetensors":
text = text[:-13]
elif extension == "ckpt":
text = text[:-4]
return text
def get_data(text):
results = {}
patterns = {
'prompt': r'(.*)',
'negative_prompt': r'Negative prompt: (.*)',
'steps': r'Steps: (\d+),',
'seed': r'Seed: (\d+),',
'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
'model': r'Model:\s*([^\s,]+)',
'cfg_scale': r'CFG scale:\s*([\d\.]+)',
'size': r'Size:\s*([0-9]+x[0-9]+)'
}
for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
match = re.search(patterns[key], text)
if match:
results[key] = match.group(1)
else:
results[key] = None
if results['size'] is not None:
w, h = results['size'].split("x")
results['w'] = w
results['h'] = h
else:
results['w'] = None
results['h'] = None
return results
prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
model_list = prodia_client.list_models()
model_names = {}
for model_name in model_list:
name_without_ext = remove_id_and_ext(model_name)
model_names[name_without_ext] = model_name
def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
language = detect(prompt)
if language == 'ru':
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(prompt)
result = prodia_client.generate({
"prompt": prompt,
"negative_prompt": negative_prompt,
"model": model,
"steps": steps,
"sampler": sampler,
"cfg_scale": cfg_scale,
"width": width,
"height": height,
"seed": seed
})
job = prodia_client.wait(result)
return job["imageUrl"]
def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
result = prodia_client.transform({
"imageData": image_to_base64(input_image),
"denoising_strength": denoising,
"prompt": prompt,
"negative_prompt": negative_prompt,
"model": model,
"steps": steps,
"sampler": sampler,
"cfg_scale": cfg_scale,
"width": width,
"height": height,
"seed": seed
})
job = prodia_client.wait(result)
return job["imageUrl"]
css = """
footer {visibility: hidden !important;}
#container{
margin: 0 auto;
max-width: 40rem;
}
#intro{
max-width: 100%;
text-align: center;
margin: 0 auto;
}
div.svelte-vt1mxs {
display: flex;
position: relative;
flex-direction: column
}
div.svelte-vt1mxs>*,div.svelte-vt1mxs>.form > * {
width: var(--size-full)
}
.gap.svelte-vt1mxs {
gap: var(--layout-gap)
}
.hide.svelte-vt1mxs {
display: none
}
.compact.svelte-vt1mxs>*,.compact.svelte-vt1mxs .box {
border-radius: 0
}
.compact.svelte-vt1mxs,.panel.svelte-vt1mxs {
border: solid var(--panel-border-width) var(--panel-border-color);
border-radius: var(--container-radius);
background: var(--panel-background-fill);
padding: var(--spacing-lg)
}
div#component-24 {
display: none;
}
div#component-8 {background: #00000024;border: 0;color: #ffffff;backdrop-filter: blur(20px);-webkit-backdrop-filter: blur(20px);border-width: 0 !important;}
span.md.svelte-9tftx4 {
display: none;
}
.empty.svelte-lk9eg8.large.unpadded_box {
background: none !important;
}
div#component-26 {
display: none;
}
div#component-7 {
background: none;
}
.wrap.default.full.svelte-119qaqt.hide {
background: none !important;
}
.styler.svelte-iyf88w {
background: none !important;
}
div#component-3 {
background: none !important;
border: 0;
}
input.scroll-hide.svelte-1f354aw {
overflow: hidden !important;
}
div#component-5 {
border-radius: 40px 0px 0px 40px;
background: black !important;
opacity: 0.9;
}
#component-6 {
border-radius: 0px 40px 40px 0px;
background: linear-gradient(358deg, #ff4d0080, #fff0);
color: #ffffffe3;
border: 2px #ffffffc2 dashed;
border-left: 0;
font-size: 30px;
letter-spacing:-1px;
position: relative;
z-index: 1;
backdrop-filter: blur(18px);
-webkit-backdrop-filter: blur(18px);
}
div#component-0 {
max-width: 100% !important;
}
.grid-wrap.svelte-1b19cri.fixed-height {
max-height: 100% !important;
overflow: auto;
}
footer.svelte-1ax1toq {
display: none !important;
}
input.scroll-hide.svelte-1f354aw {
font-size: 26px;
padding: 25px;
}
div#component-4 {
margin-top: 230px;
margin-bottom: 30px;
}
gradio-app {
background-color: transparent !important;
background: url(https://vivawaves.com/wavesweaveslogo.svg) top center no-repeat !important;
margin-top: 260px;
}
label.svelte-1f354aw {
}
.styler.svelte-iyf88w {
}
body {
background: url(https://vivawaves.com/vivatodaybg2.jpg);
background-size: cover;
}
img.svelte-1b19cri {}
.preview.svelte-1b19cri {
background: #0000004d !important;
border-radius: 20px;
padding: 20px;
}
button.svelte-1030q2h {
border-radius: 100%;
}
div.svelte-1030q2h svg {
}
svg path {
}
img.svelte-1b19cri {
border-radius: 10px;
}
.form.svelte-sfqy0y {
background: #fff0;
border-width: 0px;
opacity: 0.8;
}
.gradio-container-3-44-2,.gradio-container-3-44-2 *,.gradio-container-3-44-2 :before,.gradio-container-3-44-2 :after {
box-sizing: border-box;
border-width: 0;
border-style: solid;
}
div#component-13 {
display: none;
}
footer.svelte-mpyp5e {
}
div#intro {
display: none;
}
div.svelte-15lo0d8 {
display: flex;
flex-wrap: wrap;
gap: 0;
width: var(--size-full);
flex-direction: initial;
justify-content: center;
align-items: baseline;
}
input.svelte-1f354aw.svelte-1f354aw, textarea.svelte-1f354aw.svelte-1f354aw {
position: relative;
outline: none !important;
box-shadow: var(--input-shadow);
background: var(--input-background-fill);
padding: var(--input-padding);
width: 100%;
color: var(--body-text-color);
font-weight: var(--input-text-weight);
font-size: large;
line-height: initial;
border: none;
text-size-adjust: auto;
font-size: 23px !important;
}
div#component-24 {
display: none;
}
div#component-8 {background: #00000024;border: 0;color: #ffffff;backdrop-filter: blur(20px);-webkit-backdrop-filter: blur(20px);border-width: 0 !important;}
span.md.svelte-9tftx4 {
display: none;
}
.empty.svelte-lk9eg8.large.unpadded_box {
background: none !important;
}
div#component-26 {
display: none;
}
div#component-7 {
background: none;
}
.wrap.default.full.svelte-119qaqt.hide {
background: none !important;
}
.styler.svelte-iyf88w {
background: none !important;
}
div#component-3 {
background: none !important;
border: 0;
}
div#component-9 {
border: 0 !important;
}
input.scroll-hide.svelte-1f354aw {
overflow: hidden !important;
}
div#component-5 {
border-radius: 40px;
background: transparent !important;
opacity: 1;
}
#component-6 {
border-radius: 40px;
background: #d7661500;
border: none;
border-left: 0;
font-size: 30px;
letter-spacing: -1px;
position: relative;
z-index: 1;
backdrop-filter: none;
-webkit-backdrop-filter: none;
display: block;
}
div#component-0 {
max-width: 100% !important;
}
.grid-wrap.svelte-1b19cri.fixed-height {
max-height: 100% !important;
overflow: auto;
}
footer.svelte-1ax1toq {
display: none !important;
}
input.scroll-hide.svelte-1f354aw {
font-size: 26px;
padding: 25px;
}
div#component-4 {
margin-top: 230px;
margin-bottom: 30px;
}
gradio-app {
background-color: transparent !important;
background: url(https://vivawaves.com/wavesweaveslogo.svg) top center no-repeat !important;
margin-top: 77px;
}
label.svelte-1f354aw {
}
.styler.svelte-iyf88w {
}
body {
background: url(https://vivawaves.com/vivatodaybg2.jpg);
background-size: cover;
}
img.svelte-1b19cri {}
.preview.svelte-1b19cri {
background: #0000004d !important;
border-radius: 20px;
padding: 20px;
overflow: hidden;
}
button.svelte-1030q2h {
border-radius: 100%;
}
div.svelte-1030q2h svg {
}
svg path {
}
img.svelte-1b19cri {
border-radius: 10px;
}
.form.svelte-sfqy0y {
background: #fff0;
border-width: 0px;
opacity: 0.8;
}
.gradio-container-3-44-2,.gradio-container-3-44-2 *,.gradio-container-3-44-2 :before,.gradio-container-3-44-2 :after {
box-sizing: border-box;
border-width: 0;
border-style: solid;
}
div#component-13 {
display: none;
}
footer.svelte-mpyp5e {
display: none !important;
}
div#intro {
display: none;
}
div.svelte-15lo0d8 {
display: flex;
flex-wrap: wrap;
gap: 0 !important;
width: var(--size-full);
flex-direction: initial;
justify-content: center;
align-items: baseline;
}
input.svelte-1f354aw.svelte-1f354aw, textarea.svelte-1f354aw.svelte-1f354aw {
display: block;
position: relative;
outline: none !important;
box-shadow: var(--input-shadow);
background: var(--input-background-fill);
padding: var(--input-padding);
width: 100%;
color: var(--body-text-color);
font-weight: var(--input-text-weight);
font-size: large;
line-height: initial;
border: none;
text-size-adjust: auto;
font-size: 23px !important;
border-radius: 30px;
background: white !important;
text-align: center;
}
div#component-8 {
margin-bottom: 70px;
margin-top: 210px;
}
div#component-15 {display: none;}
div#component-18 {
display: none;
}
div#component-1 {
display: none;
}
button.selected.svelte-kqij2n {
display: none;
}
button.svelte-kqij2n {
display: none;
}
.tab-nav.scroll-hide.svelte-kqij2n {
display: none;
}
.svelte-vt1mxs.gap {
border-radius: 20px;
}
div#component-6 {padding: 26px;}
button#generate {
background: #eb7623;
border-radius: 40px;
padding: 16px;
color: #FFF;
FONT-SIZE: large;
border: 2px solid #ff7600;
border-top: 0px solid;
box-shadow: 0px 12px 10px -10px #ff7600;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Row():
with gr.Accordion(label="Models", open=False):
model = gr.Radio(interactive=True, value="epicrealism_naturalSinRC1VAE.safetensors [90a4c676]", show_label=True, choices=prodia_client.list_models())
with gr.Tabs() as tabs:
with gr.Tab("txt2img", id='t2i'):
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Основные настройки"):
with gr.Column(scale=6, min_width=600):
prompt = gr.Textbox("", placeholder="Take a deep breath and take your time describing your weave... Be as vague or specific as you want. 💜✨ ", show_label=False, lines=3)
negative_prompt = gr.Textbox(placeholder="Here you can describe anything that you would like NOT to see. ", show_label=False, lines=1, value="")
with gr.Row():
with gr.Column(scale=1):
steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=50, value=40, step=1)
with gr.Row():
with gr.Column(scale=1):
width = gr.Slider(label="Ширина", minimum=15, maximum=1024, value=1024, step=8)
height = gr.Slider(label="Длина", minimum=15, maximum=1024, value=1024, step=8)
with gr.Tab("Расширенные настройки"):
with gr.Row():
with gr.Column(scale=1):
sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
seed = gr.Slider(label="Seed", minimum=-1, maximum=10000000, value=-1)
text_button = gr.Button("Weave", variant='primary', elem_id="generate")
image_output = gr.Image()
text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed], outputs=image_output)
with gr.Tab("img2img", id='i2i'):
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Основные настройки"):
i2i_image_input = gr.Image(type="pil")
with gr.Column(scale=6, min_width=600):
i2i_prompt = gr.Textbox("", placeholder="Prompt", show_label=False, lines=3)
i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry")
with gr.Row():
with gr.Column(scale=1):
i2i_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=50, value=30, step=1)
with gr.Row():
with gr.Column(scale=1):
i2i_width = gr.Slider(label="Ширина", minimum=15, maximum=1024, value=512, step=8)
i2i_height = gr.Slider(label="Высота", minimum=15, maximum=1024, value=512, step=8)
with gr.Tab("Расширенные настройки"):
with gr.Row():
with gr.Column(scale=1):
i2i_sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
i2i_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
i2i_denoising = gr.Slider(label="Схожесть с оригиналом", minimum=0, maximum=1, value=0.7, step=0.1)
i2i_seed = gr.Slider(label="Seed", minimum=-1, maximum=10000000, value=-1)
i2i_text_button = gr.Button("Генерация", variant='primary', elem_id="generate")
i2i_image_output = gr.Image()
i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt, model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height, i2i_seed], outputs=i2i_image_output)
demo.queue(concurrency_count=64, max_size=80, api_open=False).launch(max_threads=256)