erhanmeydan commited on
Commit
e2667fe
·
verified ·
1 Parent(s): 3491efd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +222 -0
app.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import gradio as gr
4
+ from diffusers import FluxPipeline
5
+ import json
6
+ from huggingface_hub import hf_hub_download
7
+ import time
8
+
9
+ # Constants
10
+ MODEL_ID = "black-forest-labs/FLUX.1-dev" # Base model
11
+ YOUR_LORA = "anuraj-sisyphus/avatar-loras" # Your LoRA model
12
+ DEFAULT_PROMPT = "a portrait of a person with realistic details, high quality"
13
+ DEFAULT_NEG_PROMPT = "low quality, blurry, distorted, deformed features"
14
+
15
+ # Create a list of available LoRAs
16
+ # You can expand this with other compatible LoRAs if desired
17
+ LORAS = [
18
+ {
19
+ "name": "Avatar LoRAs",
20
+ "repo_id": "anuraj-sisyphus/avatar-loras",
21
+ "filename": "SLAY1MNSHA.safetensors", # Update this with the actual filename
22
+ "base_model": "FLUX.1-dev"
23
+ }
24
+ ]
25
+
26
+ # Initialize the pipeline
27
+ @torch.inference_mode()
28
+ def load_model():
29
+ pipe = FluxPipeline.from_pretrained(
30
+ MODEL_ID,
31
+ torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
32
+ )
33
+
34
+ if torch.cuda.is_available():
35
+ pipe = pipe.to("cuda")
36
+
37
+ return pipe
38
+
39
+ # Generate image function
40
+ def generate_image(
41
+ prompt,
42
+ negative_prompt,
43
+ lora_selection,
44
+ lora_scale=0.8,
45
+ guidance_scale=5.0,
46
+ steps=30,
47
+ width=1024,
48
+ height=1024,
49
+ seed=None
50
+ ):
51
+ # Load model if not already loaded
52
+ global pipe
53
+ if "pipe" not in globals():
54
+ pipe = load_model()
55
+
56
+ # Set the seed for reproducibility
57
+ if seed is None or seed == 0:
58
+ seed = int(time.time()) % 100000
59
+ generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
60
+
61
+ # Find the selected LoRA details
62
+ selected_lora = None
63
+ for lora in LORAS:
64
+ if lora["name"] == lora_selection:
65
+ selected_lora = lora
66
+ break
67
+
68
+ if selected_lora:
69
+ # Unload any previous LoRA
70
+ try:
71
+ pipe.unload_lora_weights()
72
+ except:
73
+ pass
74
+
75
+ # Load the selected LoRA
76
+ pipe.load_lora_weights(
77
+ selected_lora["repo_id"],
78
+ weight_name=selected_lora.get("filename", None)
79
+ )
80
+
81
+ # Set the LoRA scale
82
+ pipe.fuse_lora(lora_scale=lora_scale)
83
+
84
+ # Generate the image
85
+ image = pipe(
86
+ prompt=prompt,
87
+ negative_prompt=negative_prompt,
88
+ guidance_scale=guidance_scale,
89
+ num_inference_steps=steps,
90
+ width=width,
91
+ height=height,
92
+ generator=generator
93
+ ).images[0]
94
+
95
+ return image, seed
96
+
97
+ # Create the Gradio interface
98
+ with gr.Blocks(title="Avatar LoRAs Explorer") as demo:
99
+ gr.Markdown("# Avatar LoRAs Explorer")
100
+ gr.Markdown("Generate images using the Avatar LoRAs model. Adjust settings to customize your results.")
101
+
102
+ with gr.Row():
103
+ with gr.Column(scale=2):
104
+ prompt = gr.Textbox(
105
+ label="Prompt",
106
+ placeholder="Enter your prompt here...",
107
+ value=DEFAULT_PROMPT,
108
+ lines=3
109
+ )
110
+ negative_prompt = gr.Textbox(
111
+ label="Negative Prompt",
112
+ placeholder="Enter what you don't want to see...",
113
+ value=DEFAULT_NEG_PROMPT,
114
+ lines=2
115
+ )
116
+
117
+ with gr.Row():
118
+ lora_selection = gr.Dropdown(
119
+ label="Select LoRA Model",
120
+ choices=[lora["name"] for lora in LORAS],
121
+ value=LORAS[0]["name"]
122
+ )
123
+ lora_scale = gr.Slider(
124
+ label="LoRA Scale",
125
+ minimum=0.0,
126
+ maximum=1.5,
127
+ step=0.05,
128
+ value=0.8
129
+ )
130
+
131
+ with gr.Row():
132
+ guidance_scale = gr.Slider(
133
+ label="Guidance Scale",
134
+ minimum=1.0,
135
+ maximum=15.0,
136
+ step=0.5,
137
+ value=5.0
138
+ )
139
+ steps = gr.Slider(
140
+ label="Steps",
141
+ minimum=10,
142
+ maximum=100,
143
+ step=1,
144
+ value=30
145
+ )
146
+
147
+ with gr.Row():
148
+ width = gr.Slider(
149
+ label="Width",
150
+ minimum=512,
151
+ maximum=1536,
152
+ step=64,
153
+ value=1024
154
+ )
155
+ height = gr.Slider(
156
+ label="Height",
157
+ minimum=512,
158
+ maximum=1536,
159
+ step=64,
160
+ value=1024
161
+ )
162
+
163
+ seed = gr.Number(
164
+ label="Seed (0 for random)",
165
+ value=0,
166
+ precision=0
167
+ )
168
+
169
+ generate_button = gr.Button("Generate Image", variant="primary")
170
+
171
+ with gr.Column(scale=2):
172
+ output_image = gr.Image(label="Generated Image", type="pil")
173
+ used_seed = gr.Number(label="Used Seed", value=0, precision=0)
174
+
175
+ # Setup the button click event
176
+ generate_button.click(
177
+ fn=generate_image,
178
+ inputs=[
179
+ prompt,
180
+ negative_prompt,
181
+ lora_selection,
182
+ lora_scale,
183
+ guidance_scale,
184
+ steps,
185
+ width,
186
+ height,
187
+ seed
188
+ ],
189
+ outputs=[output_image, used_seed]
190
+ )
191
+
192
+ # Add examples if you have any
193
+ gr.Examples(
194
+ examples=[
195
+ [
196
+ "a portrait photo of a person with blue eyes",
197
+ DEFAULT_NEG_PROMPT,
198
+ LORAS[0]["name"],
199
+ 0.8,
200
+ 5.0,
201
+ 30,
202
+ 1024,
203
+ 1024,
204
+ 42
205
+ ]
206
+ ],
207
+ inputs=[
208
+ prompt,
209
+ negative_prompt,
210
+ lora_selection,
211
+ lora_scale,
212
+ guidance_scale,
213
+ steps,
214
+ width,
215
+ height,
216
+ seed
217
+ ],
218
+ outputs=[output_image, used_seed]
219
+ )
220
+
221
+ # Launch the app
222
+ demo.launch()