erhanmeydan's picture
Update app.py
828f69d verified
import os
import torch
import gradio as gr
from diffusers import FluxPipeline
import json
from huggingface_hub import hf_hub_download
import time
from huggingface_hub import login
# Hugging Face hesabında oturum aç
if "HF_TOKEN" in os.environ:
login(token=os.environ["HF_TOKEN"])
else:
# İsteğe bağlı: Token yoksa uyarı göster
print("HF_TOKEN bulunamadı, giriş yapılamıyor")
# Diğer kodlar buradan devam edecek...
# Constants
MODEL_ID = "black-forest-labs/FLUX.1-dev" # Base model
YOUR_LORA = "anuraj-sisyphus/avatar-loras" # Your LoRA model
DEFAULT_PROMPT = "a portrait of a person with realistic details, high quality"
DEFAULT_NEG_PROMPT = "low quality, blurry, distorted, deformed features"
# Create a list of available LoRAs
# You can expand this with other compatible LoRAs if desired
LORAS = [
{
"name": "Avatar LoRAs",
"repo_id": "anuraj-sisyphus/avatar-loras",
"filename": "SLAY1MNSHA.safetensors", # Update this with the actual filename
"base_model": "FLUX.1-dev"
}
]
# Initialize the pipeline
@torch.inference_mode()
def load_model():
pipe = FluxPipeline.from_pretrained(
MODEL_ID,
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
return pipe
# Generate image function
def generate_image(
prompt,
negative_prompt,
lora_selection,
lora_scale=0.8,
guidance_scale=5.0,
steps=30,
width=1024,
height=1024,
seed=None
):
# Load model if not already loaded
global pipe
if "pipe" not in globals():
pipe = load_model()
# Set the seed for reproducibility
if seed is None or seed == 0:
seed = int(time.time()) % 100000
generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
# Find the selected LoRA details
selected_lora = None
for lora in LORAS:
if lora["name"] == lora_selection:
selected_lora = lora
break
if selected_lora:
# Unload any previous LoRA
try:
pipe.unload_lora_weights()
except:
pass
# Load the selected LoRA
pipe.load_lora_weights(
selected_lora["repo_id"],
weight_name=selected_lora.get("filename", None)
)
# Set the LoRA scale
pipe.fuse_lora(lora_scale=lora_scale)
# Generate the image
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=steps,
width=width,
height=height,
generator=generator
).images[0]
return image, seed
# Create the Gradio interface
with gr.Blocks(title="Avatar LoRAs Explorer") as demo:
gr.Markdown("# Avatar LoRAs Explorer")
gr.Markdown("Generate images using the Avatar LoRAs model. Adjust settings to customize your results.")
with gr.Row():
with gr.Column(scale=2):
prompt = gr.Textbox(
label="Prompt",
placeholder="Enter your prompt here...",
value=DEFAULT_PROMPT,
lines=3
)
negative_prompt = gr.Textbox(
label="Negative Prompt",
placeholder="Enter what you don't want to see...",
value=DEFAULT_NEG_PROMPT,
lines=2
)
with gr.Row():
lora_selection = gr.Dropdown(
label="Select LoRA Model",
choices=[lora["name"] for lora in LORAS],
value=LORAS[0]["name"]
)
lora_scale = gr.Slider(
label="LoRA Scale",
minimum=0.0,
maximum=1.5,
step=0.05,
value=0.8
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1.0,
maximum=15.0,
step=0.5,
value=5.0
)
steps = gr.Slider(
label="Steps",
minimum=10,
maximum=100,
step=1,
value=30
)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=1536,
step=64,
value=1024
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=1536,
step=64,
value=1024
)
seed = gr.Number(
label="Seed (0 for random)",
value=0,
precision=0
)
generate_button = gr.Button("Generate Image", variant="primary")
with gr.Column(scale=2):
output_image = gr.Image(label="Generated Image", type="pil")
used_seed = gr.Number(label="Used Seed", value=0, precision=0)
# Setup the button click event
generate_button.click(
fn=generate_image,
inputs=[
prompt,
negative_prompt,
lora_selection,
lora_scale,
guidance_scale,
steps,
width,
height,
seed
],
outputs=[output_image, used_seed]
)
# Add examples if you have any
gr.Examples(
examples=[
[
"a portrait photo of a person with blue eyes",
DEFAULT_NEG_PROMPT,
LORAS[0]["name"],
0.8,
5.0,
30,
1024,
1024,
42
]
],
inputs=[
prompt,
negative_prompt,
lora_selection,
lora_scale,
guidance_scale,
steps,
width,
height,
seed
],
outputs=[output_image, used_seed]
)
# Launch the app
demo.launch()