Spaces:
Running
on
Zero
Running
on
Zero
support fal kontext loras (#5)
Browse files- support fal kontext loras (202822dc71abb5fc2e5ee72f217462572bb0749b)
- Update flux_loras.json (891349b92c35d39d844bdae63a3890dbaadf8042)
- Update app.py (aaa7343a0099889325075716d27f0574b8b042df)
- Update app.py (db8e7fdb6ebb1906e29db3d2632c9587c04513f5)
- Update app.py (df43181c95e41b1cf6f48b827f59fe3b0f95b0df)
- app.py +19 -12
- flux_loras.json +27 -0
- requirements.txt +3 -2
app.py
CHANGED
|
@@ -30,6 +30,7 @@ with open("flux_loras.json", "r") as file:
|
|
| 30 |
"trigger_word": item.get("trigger_word", ""),
|
| 31 |
"trigger_position": item.get("trigger_position", "prepend"),
|
| 32 |
"weights": item.get("weights", "pytorch_lora_weights.safetensors"),
|
|
|
|
| 33 |
}
|
| 34 |
for item in data
|
| 35 |
]
|
|
@@ -126,12 +127,12 @@ def classify_gallery(flux_loras):
|
|
| 126 |
sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
|
| 127 |
return [(item["image"], item["title"]) for item in sorted_gallery], sorted_gallery
|
| 128 |
|
| 129 |
-
def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.75, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 130 |
"""Wrapper function to handle state serialization"""
|
| 131 |
-
return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, guidance_scale, lora_scale, flux_loras, progress)
|
| 132 |
|
| 133 |
@spaces.GPU
|
| 134 |
-
def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, flux_loras=None,
|
| 135 |
"""Generate image with selected LoRA"""
|
| 136 |
global current_lora, pipe
|
| 137 |
|
|
@@ -169,13 +170,19 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 169 |
input_image = input_image.convert("RGB")
|
| 170 |
# Add trigger word to prompt
|
| 171 |
trigger_word = lora_to_use["trigger_word"]
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
try:
|
| 180 |
image = pipe(
|
| 181 |
image=input_image,
|
|
@@ -242,7 +249,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 242 |
with gr.Column(scale=4, elem_id="box_column"):
|
| 243 |
with gr.Group(elem_id="gallery_box"):
|
| 244 |
input_image = gr.Image(label="Upload a picture of yourself", type="pil", height=300)
|
| 245 |
-
|
| 246 |
gallery = gr.Gallery(
|
| 247 |
label="Pick a LoRA",
|
| 248 |
allow_preview=False,
|
|
@@ -328,7 +335,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 328 |
gr.on(
|
| 329 |
triggers=[run_button.click, prompt.submit],
|
| 330 |
fn=infer_with_lora_wrapper,
|
| 331 |
-
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, gr_flux_loras],
|
| 332 |
outputs=[result, seed, reuse_button]
|
| 333 |
)
|
| 334 |
|
|
|
|
| 30 |
"trigger_word": item.get("trigger_word", ""),
|
| 31 |
"trigger_position": item.get("trigger_position", "prepend"),
|
| 32 |
"weights": item.get("weights", "pytorch_lora_weights.safetensors"),
|
| 33 |
+
"lora_type": item.get("lora_type", "flux"),
|
| 34 |
}
|
| 35 |
for item in data
|
| 36 |
]
|
|
|
|
| 127 |
sorted_gallery = sorted(flux_loras, key=lambda x: x.get("likes", 0), reverse=True)
|
| 128 |
return [(item["image"], item["title"]) for item in sorted_gallery], sorted_gallery
|
| 129 |
|
| 130 |
+
def infer_with_lora_wrapper(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.75,portrait_mode=False, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 131 |
"""Wrapper function to handle state serialization"""
|
| 132 |
+
return infer_with_lora(input_image, prompt, selected_index, custom_lora, seed, randomize_seed, guidance_scale, lora_scale, portrait_mode, flux_loras, progress)
|
| 133 |
|
| 134 |
@spaces.GPU
|
| 135 |
+
def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, randomize_seed=False, guidance_scale=2.5, lora_scale=1.0, portrait_mode=False, flux_loras=None, progress=gr.Progress(track_tqdm=True)):
|
| 136 |
"""Generate image with selected LoRA"""
|
| 137 |
global current_lora, pipe
|
| 138 |
|
|
|
|
| 170 |
input_image = input_image.convert("RGB")
|
| 171 |
# Add trigger word to prompt
|
| 172 |
trigger_word = lora_to_use["trigger_word"]
|
| 173 |
+
is_kontext_lora = lora_to_use["lora_type"] == "kontext"
|
| 174 |
+
if not is_kontext_lora:
|
| 175 |
+
if trigger_word == ", How2Draw":
|
| 176 |
+
prompt = f"create a How2Draw sketch of the person of the photo {prompt}, maintain the facial identity of the person and general features"
|
| 177 |
+
elif trigger_word == ", video game screenshot in the style of THSMS":
|
| 178 |
+
prompt = f"create a video game screenshot in the style of THSMS with the person from the photo, {prompt}. maintain the facial identity of the person and general features"
|
| 179 |
+
else:
|
| 180 |
+
prompt = f"convert the style of this portrait photo to {trigger_word} while maintaining the identity of the person. {prompt}. Make sure to maintain the person's facial identity and features, while still changing the overall style to {trigger_word}."
|
| 181 |
+
else:
|
| 182 |
+
if portrait_mode:
|
| 183 |
+
prompt = f"{trigger_word} while maintaining the identity of the person. {prompt}. Make sure to maintain the person's facial identity and features."
|
| 184 |
+
else:
|
| 185 |
+
prompt = f"{trigger_word}. {prompt}."
|
| 186 |
try:
|
| 187 |
image = pipe(
|
| 188 |
image=input_image,
|
|
|
|
| 249 |
with gr.Column(scale=4, elem_id="box_column"):
|
| 250 |
with gr.Group(elem_id="gallery_box"):
|
| 251 |
input_image = gr.Image(label="Upload a picture of yourself", type="pil", height=300)
|
| 252 |
+
portrait_mode = gr.Checkbox(label="portrait mode", value=True)
|
| 253 |
gallery = gr.Gallery(
|
| 254 |
label="Pick a LoRA",
|
| 255 |
allow_preview=False,
|
|
|
|
| 335 |
gr.on(
|
| 336 |
triggers=[run_button.click, prompt.submit],
|
| 337 |
fn=infer_with_lora_wrapper,
|
| 338 |
+
inputs=[input_image, prompt, selected_state, custom_loaded_lora, seed, randomize_seed, guidance_scale, lora_scale, portrait_mode, gr_flux_loras],
|
| 339 |
outputs=[result, seed, reuse_button]
|
| 340 |
)
|
| 341 |
|
flux_loras.json
CHANGED
|
@@ -1,4 +1,31 @@
|
|
| 1 |
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
{
|
| 3 |
"image":"lora_examples/yarn.png",
|
| 4 |
"title":"Yarn Art",
|
|
|
|
| 1 |
[
|
| 2 |
+
{
|
| 3 |
+
"image": "https://huggingface.co/fal/Wojak-Kontext-Dev-LoRA/resolve/main/images/2.png",
|
| 4 |
+
"title": "Wojak",
|
| 5 |
+
"repo": "fal/Wojak-Kontext-Dev-LoRA",
|
| 6 |
+
"weights": "wojak-kontext-dev-lora.safetensors",
|
| 7 |
+
"trigger_word": "Convert to wojak style drawing",
|
| 8 |
+
"trigger_position": "prepend",
|
| 9 |
+
"lora_type": "kontext"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"image": "https://huggingface.co/fal/Broccoli-Hair-Kontext-Dev-LoRA/resolve/main/images/3.png",
|
| 13 |
+
"title": "Broccoli Hair",
|
| 14 |
+
"repo": "fal/Broccoli-Hair-Kontext-Dev-LoRA",
|
| 15 |
+
"weights": "broccoli-hair-kontext-dev-lora.safetensors",
|
| 16 |
+
"trigger_word": "Change hair to a broccoli haircut",
|
| 17 |
+
"trigger_position": "prepend",
|
| 18 |
+
"lora_type": "kontext"
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"image": "https://huggingface.co/fal/Plushie-Kontext-Dev-LoRA/resolve/main/images/1.png",
|
| 22 |
+
"title": "Plushie",
|
| 23 |
+
"repo": "fal/Plushie-Kontext-Dev-LoRA",
|
| 24 |
+
"weights": "plushie-kontext-dev-lora.safetensors",
|
| 25 |
+
"trigger_word": "Convert to plushie style",
|
| 26 |
+
"trigger_position": "prepend",
|
| 27 |
+
"lora_type": "kontext"
|
| 28 |
+
},
|
| 29 |
{
|
| 30 |
"image":"lora_examples/yarn.png",
|
| 31 |
"title":"Yarn Art",
|
requirements.txt
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
transformers
|
| 2 |
-
git+https://github.com/huggingface/diffusers.git
|
| 3 |
accelerate
|
| 4 |
safetensors
|
| 5 |
sentencepiece
|
| 6 |
-
peft
|
|
|
|
|
|
|
|
|
| 1 |
transformers
|
|
|
|
| 2 |
accelerate
|
| 3 |
safetensors
|
| 4 |
sentencepiece
|
| 5 |
+
peft
|
| 6 |
+
|
| 7 |
+
git+https://github.com/linoytsaban/diffusers.git@kontext-lora
|